summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorTodd Willey <todd@ansolabs.com>2011-01-25 09:00:56 -0800
committerTodd Willey <todd@ansolabs.com>2011-01-25 09:00:56 -0800
commita964fc3a8efad33b0dbb94e8a128c512a248f7f1 (patch)
tree1d40ee63f45cc8fae3f089621e8e64bf95a3d71e /nova/virt
parentf02c9e781bdddd609601da81b97a438b6d5b9781 (diff)
parent07f39806f3b82d5d06371758e3efe597a47434ed (diff)
Merge trunk.
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/hyperv.py85
-rw-r--r--nova/virt/images.py2
-rw-r--r--nova/virt/libvirt_conn.py42
-rw-r--r--nova/virt/xenapi/fake.py7
-rw-r--r--nova/virt/xenapi/vm_utils.py91
-rw-r--r--nova/virt/xenapi/vmops.py81
-rw-r--r--nova/virt/xenapi/volume_utils.py18
-rw-r--r--nova/virt/xenapi/volumeops.py25
-rw-r--r--nova/virt/xenapi_conn.py21
9 files changed, 225 insertions, 147 deletions
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 30dc1c79b..5facb7aff 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -129,7 +129,7 @@ class HyperVConnection(object):
vm = self._lookup(instance.name)
if vm is not None:
raise exception.Duplicate(_('Attempt to create duplicate vm %s') %
- instance.name)
+ instance.name)
user = manager.AuthManager().get_user(instance['user_id'])
project = manager.AuthManager().get_project(instance['project_id'])
@@ -159,7 +159,7 @@ class HyperVConnection(object):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = instance['name']
(job, ret_val) = vs_man_svc.DefineVirtualSystem(
- [], None, vs_gs_data.GetText_(1))[1:]
+ [], None, vs_gs_data.GetText_(1))[1:]
if ret_val == WMI_JOB_STATUS_STARTED:
success = self._check_job_status(job)
else:
@@ -184,40 +184,40 @@ class HyperVConnection(object):
memsetting.Limit = mem
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
- vm.path_(), [memsetting.GetText_(1)])
+ vm.path_(), [memsetting.GetText_(1)])
LOG.debug(_('Set memory for vm %s...'), instance.name)
procsetting = vmsetting.associators(
- wmi_result_class='Msvm_ProcessorSettingData')[0]
+ wmi_result_class='Msvm_ProcessorSettingData')[0]
vcpus = long(instance['vcpus'])
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = vcpus
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
- vm.path_(), [procsetting.GetText_(1)])
+ vm.path_(), [procsetting.GetText_(1)])
LOG.debug(_('Set vcpus for vm %s...'), instance.name)
def _create_disk(self, vm_name, vhdfile):
"""Create a disk and attach it to the vm"""
- LOG.debug(_('Creating disk for %s by attaching disk file %s'),
- vm_name, vhdfile)
+ LOG.debug(_('Creating disk for %(vm_name)s by attaching'
+ ' disk file %(vhdfile)s') % locals())
#Find the IDE controller for the vm.
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
+ wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
- wmi_result_class='MSVM_ResourceAllocationSettingData')
+ wmi_result_class='MSVM_ResourceAllocationSettingData')
ctrller = [r for r in rasds
if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\
- and r.Address == "0"]
+ and r.Address == "0"]
#Find the default disk drive object for the vm and clone it.
diskdflt = self._conn.query(
- "SELECT * FROM Msvm_ResourceAllocationSettingData \
- WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
- AND InstanceID LIKE '%Default%'")[0]
+ "SELECT * FROM Msvm_ResourceAllocationSettingData \
+ WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
+ AND InstanceID LIKE '%Default%'")[0]
diskdrive = self._clone_wmi_obj(
- 'Msvm_ResourceAllocationSettingData', diskdflt)
+ 'Msvm_ResourceAllocationSettingData', diskdflt)
#Set the IDE ctrller as parent.
diskdrive.Parent = ctrller[0].path_()
diskdrive.Address = 0
@@ -263,17 +263,18 @@ class HyperVConnection(object):
default_nic_data = [n for n in emulatednics_data
if n.InstanceID.rfind('Default') > 0]
new_nic_data = self._clone_wmi_obj(
- 'Msvm_EmulatedEthernetPortSettingData',
- default_nic_data[0])
+ 'Msvm_EmulatedEthernetPortSettingData',
+ default_nic_data[0])
#Create a port on the vswitch.
(new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name,
"", extswitch.path_())
if ret_val != 0:
LOG.error(_('Failed creating a port on the external vswitch'))
raise Exception(_('Failed creating port for %s'),
- vm_name)
- LOG.debug(_("Created switch port %s on switch %s"),
- vm_name, extswitch.path_())
+ vm_name)
+ ext_path = extswitch.path_()
+ LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
+ % locals())
#Connect the new nic to the new port.
new_nic_data.Connection = [new_port]
new_nic_data.ElementName = vm_name + ' nic'
@@ -283,7 +284,7 @@ class HyperVConnection(object):
new_resources = self._add_virt_resource(new_nic_data, vm)
if new_resources is None:
raise Exception(_('Failed to add nic to VM %s'),
- vm_name)
+ vm_name)
LOG.info(_("Created nic for %s "), vm_name)
def _add_virt_resource(self, res_setting_data, target_vm):
@@ -319,8 +320,10 @@ class HyperVConnection(object):
if job.JobState != WMI_JOB_STATE_COMPLETED:
LOG.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription)
return False
- LOG.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description,
- job.ElapsedTime)
+ desc = job.Description
+ elap = job.ElapsedTime
+ LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s ")
+ % locals())
return True
def _find_external_network(self):
@@ -386,7 +389,9 @@ class HyperVConnection(object):
vhdfile = self._cim_conn.CIM_DataFile(Name=disk)
for vf in vhdfile:
vf.Delete()
- LOG.debug(_("Del: disk %s vm %s"), vhdfile, instance.name)
+ instance_name = instance.name
+ LOG.debug(_("Del: disk %(vhdfile)s vm %(instance_name)s")
+ % locals())
def get_info(self, instance_id):
"""Get information about the VM"""
@@ -402,12 +407,14 @@ class HyperVConnection(object):
summary_info = vs_man_svc.GetSummaryInformation(
[4, 100, 103, 105], settings_paths)[1]
info = summary_info[0]
- LOG.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \
- cpu_time=%s"), instance_id,
- str(HYPERV_POWER_STATE[info.EnabledState]),
- str(info.MemoryUsage),
- str(info.NumberOfProcessors),
- str(info.UpTime))
+ state = str(HYPERV_POWER_STATE[info.EnabledState])
+ memusage = str(info.MemoryUsage)
+ numprocs = str(info.NumberOfProcessors)
+ uptime = str(info.UpTime)
+
+ LOG.debug(_("Got Info for vm %(instance_id)s: state=%(state)s,"
+ " mem=%(memusage)s, num_cpu=%(numprocs)s,"
+ " cpu_time=%(uptime)s") % locals())
return {'state': HYPERV_POWER_STATE[info.EnabledState],
'max_mem': info.MemoryUsage,
@@ -441,22 +448,22 @@ class HyperVConnection(object):
#already in the state requested
success = True
if success:
- LOG.info(_("Successfully changed vm state of %s to %s"), vm_name,
- req_state)
+ LOG.info(_("Successfully changed vm state of %(vm_name)s"
+ " to %(req_state)s") % locals())
else:
- LOG.error(_("Failed to change vm state of %s to %s"), vm_name,
- req_state)
- raise Exception(_("Failed to change vm state of %s to %s"),
- vm_name, req_state)
+ msg = _("Failed to change vm state of %(vm_name)s"
+ " to %(req_state)s") % locals()
+ LOG.error(msg)
+ raise Exception(msg)
def attach_volume(self, instance_name, device_path, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot attach volume to missing %s vm' %
- instance_name)
+ raise exception.NotFound('Cannot attach volume to missing %s vm'
+ % instance_name)
def detach_volume(self, instance_name, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot detach volume from missing %s ' %
- instance_name)
+ raise exception.NotFound('Cannot detach volume from missing %s '
+ % instance_name)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index ecf0e5efb..9c987e14d 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -67,7 +67,7 @@ def _fetch_image_no_curl(url, path, headers):
urlopened = urllib2.urlopen(request)
urlretrieve(urlopened, path)
- LOG.debug(_("Finished retreving %s -- placed in %s"), url, path)
+ LOG.debug(_("Finished retreving %(url)s -- placed in %(path)s") % locals())
def _fetch_s3_image(image, path, user, project):
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 38eddf748..9186b49ab 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -236,8 +236,9 @@ class LibvirtConnection(object):
def _cleanup(self, instance):
target = os.path.join(FLAGS.instances_path, instance['name'])
- LOG.info(_('instance %s: deleting instance files %s'),
- instance['name'], target)
+ instance_name = instance['name']
+ LOG.info(_('instance %(instance_name)s: deleting instance files'
+ ' %(target)s') % locals())
if os.path.exists(target):
shutil.rmtree(target)
@@ -418,7 +419,7 @@ class LibvirtConnection(object):
virsh_output = virsh_output[0].strip()
if virsh_output.startswith('/dev/'):
- LOG.info(_('cool, it\'s a device'))
+ LOG.info(_("cool, it's a device"))
out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
return out
@@ -426,7 +427,7 @@ class LibvirtConnection(object):
return ''
def _append_to_file(self, data, fpath):
- LOG.info(_('data: %r, fpath: %r'), data, fpath)
+ LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@@ -434,7 +435,7 @@ class LibvirtConnection(object):
def _dump_file(self, fpath):
fp = open(fpath, 'r+')
contents = fp.read()
- LOG.info(_('Contents of file %s: %r'), fpath, contents)
+ LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals())
return contents
@exception.wrap_exception
@@ -510,7 +511,6 @@ class LibvirtConnection(object):
base_dir = os.path.join(FLAGS.instances_path, '_base')
if not os.path.exists(base_dir):
os.mkdir(base_dir)
- os.chmod(base_dir, 0777)
base = os.path.join(base_dir, fname)
if not os.path.exists(base):
fn(target=base, *args, **kwargs)
@@ -541,7 +541,6 @@ class LibvirtConnection(object):
# ensure directories exist and are writable
utils.execute('mkdir -p %s' % basepath(suffix=''))
- utils.execute('chmod 0777 %s' % basepath(suffix=''))
LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
@@ -623,21 +622,22 @@ class LibvirtConnection(object):
'dns': network_ref['dns'],
'ra_server': ra_server}
if key or net:
+ inst_name = inst['name']
+ img_id = inst.image_id
if key:
- LOG.info(_('instance %s: injecting key into image %s'),
- inst['name'], inst.image_id)
+ LOG.info(_('instance %(inst_name)s: injecting key into'
+ ' image %(img_id)s') % locals())
if net:
- LOG.info(_('instance %s: injecting net into image %s'),
- inst['name'], inst.image_id)
+ LOG.info(_('instance %(inst_name)s: injecting net into'
+ ' image %(img_id)s') % locals())
try:
disk.inject_data(basepath('disk'), key, net,
partition=target_partition,
nbd=FLAGS.use_cow_images)
except Exception as e:
# This could be a windows image, or a vmdk format disk
- LOG.warn(_('instance %s: ignoring error injecting data'
- ' into image %s (%s)'),
- inst['name'], inst.image_id, e)
+ LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
+ ' data into image %(img_id)s (%(e)s)') % locals())
if FLAGS.libvirt_type == 'uml':
utils.execute('sudo chown root %s' % basepath('disk'))
@@ -645,9 +645,6 @@ class LibvirtConnection(object):
def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
LOG.debug(_('instance %s: starting toXML method'), instance['name'])
- network = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
- LOG.debug(_('instance %s: starting toXML method'), instance['name'])
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
# FIXME(vish): stick this in db
@@ -732,7 +729,8 @@ class LibvirtConnection(object):
'cpu_time': cpu_time}
def get_diagnostics(self, instance_name):
- raise exception.APIError("diagnostics are not supported for libvirt")
+ raise exception.APIError(_("diagnostics are not supported "
+ "for libvirt"))
def get_disks(self, instance_name):
"""
@@ -1471,11 +1469,11 @@ class IptablesFirewallDriver(FirewallDriver):
instance['id'])
def _dhcp_server_for_instance(self, instance):
- network = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
return network['gateway']
def _ra_server_for_instance(self, instance):
- network = db.project_get_network(context.get_admin_context(),
- instance['project_id'])
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
return network['ra_server']
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 4bfaf4b57..e8352771c 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -69,7 +69,9 @@ LOG = logging.getLogger("nova.virt.xenapi.fake")
def log_db_contents(msg=None):
- LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
+ text = msg or ""
+ content = pformat(_db_content)
+ LOG.debug(_("%(text)s: _db_content => %(content)s") % locals())
def reset():
@@ -331,7 +333,8 @@ class SessionBase(object):
if impl is not None:
def callit(*params):
- LOG.debug(_('Calling %s %s'), name, impl)
+ localname = name
+ LOG.debug(_('Calling %(localname)s %(impl)s') % locals())
self._check_session(params)
return impl(*params)
return callit
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index b80ff4dba..4afd28dd8 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -22,6 +22,7 @@ their attributes like VDIs, VIFs, as well as their lookup functions.
import os
import pickle
import re
+import time
import urllib
from xml.dom import minidom
@@ -133,7 +134,8 @@ class VMHelper(HelperBase):
'pae': 'true', 'viridian': 'true'}
LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
- LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
+ instance_name = instance.name
+ LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals())
return vm_ref
@classmethod
@@ -153,10 +155,11 @@ class VMHelper(HelperBase):
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
- LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref)
+ LOG.debug(_('Creating VBD for VM %(vm_ref)s,'
+ ' VDI %(vdi_ref)s ... ') % locals())
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
- LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
- vdi_ref)
+ LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
+ ' VDI %(vdi_ref)s.') % locals())
return vbd_ref
@classmethod
@@ -208,11 +211,11 @@ class VMHelper(HelperBase):
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
- LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
- network_ref)
+ LOG.debug(_('Creating VIF for VM %(vm_ref)s,'
+ ' network %(network_ref)s.') % locals())
vif_ref = session.call_xenapi('VIF.create', vif_rec)
- LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
- vm_ref, network_ref)
+ LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,'
+ ' network %(network_ref)s.') % locals())
return vif_ref
@classmethod
@@ -230,8 +233,9 @@ class VMHelper(HelperBase):
'other_config': {},
'sm_config': {},
'tags': []})
- LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref,
- name_label, virtual_size, read_only, sr_ref)
+ LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,'
+ ' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.')
+ % locals())
return vdi_ref
@classmethod
@@ -241,7 +245,8 @@ class VMHelper(HelperBase):
"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
- LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label)
+ LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...")
+ % locals())
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
@@ -254,8 +259,8 @@ class VMHelper(HelperBase):
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
- LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
- vm_ref)
+ LOG.debug(_('Created snapshot %(template_vm_ref)s from'
+ ' VM %(vm_ref)s.') % locals())
parent_uuid = wait_for_vhd_coalesce(
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
@@ -268,8 +273,8 @@ class VMHelper(HelperBase):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
- logging.debug(_("Asking xapi to upload %s as ID %s"),
- vdi_uuids, image_id)
+ logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
+ " ID %(image_id)s") % locals())
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
@@ -309,7 +314,7 @@ class VMHelper(HelperBase):
meta, image_file = c.get_image(image)
virtual_size = int(meta['size'])
vdi_size = virtual_size
- LOG.debug(_("Size for image %s:%d"), image, virtual_size)
+ LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
if type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
@@ -343,7 +348,7 @@ class VMHelper(HelperBase):
def _fetch_image_objectstore(cls, session, instance_id, image, access,
secret, type):
url = images.image_url(image)
- LOG.debug(_("Asking xapi to fetch %s as %s"), url, access)
+ LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
@@ -498,7 +503,8 @@ def get_vhd_parent(session, vdi_rec):
parent_uuid = vdi_rec['sm_config']['vhd-parent']
parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid)
parent_rec = session.get_xenapi().VDI.get_record(parent_ref)
- LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref)
+ vdi_uuid = vdi_rec['uuid']
+ LOG.debug(_("VHD %(vdi_uuid)s has parent %(parent_ref)s") % locals())
return parent_ref, parent_rec
else:
return None
@@ -539,16 +545,17 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
def _poll_vhds():
attempts['counter'] += 1
if attempts['counter'] > max_attempts:
- msg = (_("VHD coalesce attempts exceeded (%d > %d), giving up...")
- % (attempts['counter'], max_attempts))
+ counter = attempts['counter']
+ msg = (_("VHD coalesce attempts exceeded (%(counter)d >"
+ " %(max_attempts)d), giving up...") % locals())
raise exception.Error(msg)
scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
- LOG.debug(_("Parent %s doesn't match original parent %s, "
- "waiting for coalesce..."), parent_uuid,
- original_parent_uuid)
+ LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
+ " %(original_parent_uuid)s, waiting for coalesce...")
+ % locals())
else:
# Breakout of the loop (normally) and return the parent_uuid
raise utils.LoopingCallDone(parent_uuid)
@@ -566,8 +573,8 @@ def get_vdi_for_vm_safely(session, vm_ref):
else:
num_vdis = len(vdi_refs)
if num_vdis != 1:
- raise Exception(_("Unexpected number of VDIs (%s) found for "
- "VM %s") % (num_vdis, vm_ref))
+ raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found"
+ " for VM %(vm_ref)s") % locals())
vdi_ref = vdi_refs[0]
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
@@ -589,6 +596,27 @@ def find_sr(session):
return None
+def remap_vbd_dev(dev):
+ """Return the appropriate location for a plugged-in VBD device
+
+ Ubuntu Maverick moved xvd? -> sd?. This is considered a bug and will be
+ fixed in future versions:
+ https://bugs.launchpad.net/ubuntu/+source/linux/+bug/684875
+
+ For now, we work around it by just doing a string replace.
+ """
+ # NOTE(sirp): This hack can go away when we pull support for Maverick
+ should_remap = FLAGS.xenapi_remap_vbd_dev
+ if not should_remap:
+ return dev
+
+ old_prefix = 'xvd'
+ new_prefix = FLAGS.xenapi_remap_vbd_dev_prefix
+ remapped_dev = dev.replace(old_prefix, new_prefix)
+
+ return remapped_dev
+
+
def with_vdi_attached_here(session, vdi, read_only, f):
this_vm_ref = get_this_vm_ref(session)
vbd_rec = {}
@@ -611,7 +639,13 @@ def with_vdi_attached_here(session, vdi, read_only, f):
LOG.debug(_('Plugging VBD %s ... '), vbd)
session.get_xenapi().VBD.plug(vbd)
LOG.debug(_('Plugging VBD %s done.'), vbd)
- return f(session.get_xenapi().VBD.get_device(vbd))
+ orig_dev = session.get_xenapi().VBD.get_device(vbd)
+ LOG.debug(_('VBD %s plugged as %s'), vbd, orig_dev)
+ dev = remap_vbd_dev(orig_dev)
+ if dev != orig_dev:
+ LOG.debug(_('VBD %(vbd)s plugged into wrong dev, '
+ 'remapping to %(dev)s') % locals())
+ return f(dev)
finally:
LOG.debug(_('Destroying VBD for VDI %s ... '), vdi)
vbd_unplug_with_retry(session, vbd)
@@ -624,6 +658,7 @@ def vbd_unplug_with_retry(session, vbd):
DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
seeing the device still in use, even when all processes using the device
should be dead."""
+ # FIXME(sirp): We can use LoopingCall here w/o blocking sleep()
while True:
try:
session.get_xenapi().VBD.unplug(vbd)
@@ -679,8 +714,8 @@ def _write_partition(virtual_size, dev):
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
- LOG.debug(_('Writing partition table %d %d to %s...'),
- primary_first, primary_last, dest)
+ LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
+ ' to %(dest)s...') % locals())
def execute(cmd, process_input=None, check_exit_code=True):
return utils.execute(cmd=cmd,
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 6c2fd6a68..628a171fa 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -104,7 +104,9 @@ class VMOps(object):
network_ref, instance.mac_address)
LOG.debug(_('Starting VM %s...'), vm_ref)
self._session.call_xenapi('VM.start', vm_ref, False, False)
- LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref)
+ instance_name = instance.name
+ LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
+ % locals())
# NOTE(armando): Do we really need to do this in virt?
timer = utils.LoopingCall(f=None)
@@ -196,7 +198,8 @@ class VMOps(object):
template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
self._session, instance.id, vm_ref, label)
except self.XenAPI.Failure, exc:
- logging.error(_("Unable to Snapshot %s: %s"), vm_ref, exc)
+ logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
+ % locals())
return
try:
@@ -252,41 +255,71 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def destroy(self, instance):
- """Destroy VM instance"""
- vm = VMHelper.lookup(self._session, instance.name)
- return self._destroy(instance, vm, shutdown=True)
-
- def _destroy(self, instance, vm, shutdown=True):
- """ Destroy VM instance """
- if vm is None:
- # Don't complain, just return. This lets us clean up instances
- # that have already disappeared from the underlying platform.
+ def _shutdown(self, instance, vm):
+ """Shutdown an instance """
+ state = self.get_info(instance['name'])['state']
+ if state == power_state.SHUTDOWN:
+ LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") %
+ locals())
return
- # Get the VDIs related to the VM
+
+ try:
+ task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
+ self._session.wait_for_task(instance.id, task)
+ except self.XenAPI.Failure, exc:
+ LOG.exception(exc)
+
+ def _destroy_vdis(self, instance, vm):
+ """Destroys all VDIs associated with a VM """
vdis = VMHelper.lookup_vm_vdis(self._session, vm)
- if shutdown:
+
+ if not vdis:
+ return
+
+ for vdi in vdis:
try:
- task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
+ task = self._session.call_xenapi('Async.VDI.destroy', vdi)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
- # Disk clean-up
- if vdis:
- for vdi in vdis:
- try:
- task = self._session.call_xenapi('Async.VDI.destroy', vdi)
- self._session.wait_for_task(instance.id, task)
- except self.XenAPI.Failure, exc:
- LOG.exception(exc)
- # VM Destroy
+ def _destroy_vm(self, instance, vm):
+ """Destroys a VM record """
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ def destroy(self, instance):
+ """
+ Destroy VM instance
+
+ This is the method exposed by xenapi_conn.destroy(). The rest of the
+ destroy_* methods are internal.
+ """
+ vm = VMHelper.lookup(self._session, instance.name)
+ return self._destroy(instance, vm, shutdown=True)
+
+ def _destroy(self, instance, vm, shutdown=True):
+ """
+ Destroys VM instance by performing:
+
+ 1. A shutdown if requested
+ 2. Destroying associated VDIs
+ 3. Destroying that actual VM record
+ """
+ if vm is None:
+ # Don't complain, just return. This lets us clean up instances
+ # that have already disappeared from the underlying platform.
+ return
+
+ if shutdown:
+ self._shutdown(instance, vm)
+
+ self._destroy_vdis(instance, vm)
+ self._destroy_vm(instance, vm)
+
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 0cd15b950..d5ebd29d5 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -71,7 +71,7 @@ class VolumeHelper(HelperBase):
session.get_xenapi_host(),
record,
'0', label, description, 'iscsi', '', False, {})
- LOG.debug(_('Introduced %s as %s.'), label, sr_ref)
+ LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
return sr_ref
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
@@ -98,20 +98,20 @@ class VolumeHelper(HelperBase):
try:
pbds = session.get_xenapi().SR.get_PBDs(sr_ref)
except cls.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %s when getting PBDs for %s'),
- exc, sr_ref)
+ LOG.warn(_('Ignoring exception %(exc)s when getting PBDs'
+ ' for %(sr_ref)s') % locals())
for pbd in pbds:
try:
session.get_xenapi().PBD.unplug(pbd)
except cls.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %s when unplugging PBD %s'),
- exc, pbd)
+ LOG.warn(_('Ignoring exception %(exc)s when unplugging'
+ ' PBD %(pbd)s') % locals())
try:
session.get_xenapi().SR.forget(sr_ref)
LOG.debug(_("Forgetting SR %s done."), sr_ref)
except cls.XenAPI.Failure, exc:
- LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc,
- sr_ref)
+ LOG.warn(_('Ignoring exception %(exc)s when forgetting'
+ ' SR %(sr_ref)s') % locals())
@classmethod
def introduce_vdi(cls, session, sr_ref):
@@ -172,8 +172,8 @@ class VolumeHelper(HelperBase):
(volume_id is None) or \
(target_host is None) or \
(target_iqn is None):
- raise StorageError(_('Unable to obtain target information %s, %s')
- % (device_path, mountpoint))
+ raise StorageError(_('Unable to obtain target information'
+ ' %(device_path)s, %(mountpoint)s') % locals())
volume_info = {}
volume_info['deviceNumber'] = device_number
volume_info['volumeId'] = volume_id
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 189f968c6..d89a6f995 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -48,8 +48,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# NOTE: No Resource Pool concept so far
- LOG.debug(_("Attach_volume: %s, %s, %s"),
- instance_name, device_path, mountpoint)
+ LOG.debug(_("Attach_volume: %(instance_name)s, %(device_path)s,"
+ " %(mountpoint)s") % locals())
# Create the iSCSI SR, and the PDB through which hosts access SRs.
# But first, retrieve target info, like Host, IQN, LUN and SCSIID
vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
@@ -66,9 +66,8 @@ class VolumeOps(object):
except StorageError, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- raise Exception(_('Unable to create VDI on SR %s for instance %s')
- % (sr_ref,
- instance_name))
+ raise Exception(_('Unable to create VDI on SR %(sr_ref)s for'
+ ' instance %(instance_name)s') % locals())
else:
try:
vbd_ref = VMHelper.create_vbd(self._session,
@@ -78,9 +77,8 @@ class VolumeOps(object):
except self.XenAPI.Failure, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- raise Exception(_('Unable to use SR %s for instance %s')
- % (sr_ref,
- instance_name))
+ raise Exception(_('Unable to use SR %(sr_ref)s for'
+ ' instance %(instance_name)s') % locals())
else:
try:
task = self._session.call_xenapi('Async.VBD.plug',
@@ -92,8 +90,8 @@ class VolumeOps(object):
sr_ref)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
- LOG.info(_('Mountpoint %s attached to instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %(mountpoint)s attached to'
+ ' instance %(instance_name)s') % locals())
def detach_volume(self, instance_name, mountpoint):
"""Detach volume storage to VM instance"""
@@ -103,7 +101,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# Detach VBD from VM
- LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
+ LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
+ % locals())
device_number = VolumeHelper.mountpoint_to_number(mountpoint)
try:
vbd_ref = VMHelper.find_vbd_by_number(self._session,
@@ -125,5 +124,5 @@ class VolumeOps(object):
LOG.exception(exc)
# Forget SR
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
- LOG.info(_('Mountpoint %s detached from instance %s'),
- mountpoint, instance_name)
+ LOG.info(_('Mountpoint %(mountpoint)s detached from'
+ ' instance %(instance_name)s') % locals())
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index c57c883c9..78f0d14b9 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -109,6 +109,14 @@ flags.DEFINE_string('target_port',
flags.DEFINE_string('iqn_prefix',
'iqn.2010-10.org.openstack',
'IQN Prefix')
+# NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick, when we pull
+# support for it, we should remove this
+flags.DEFINE_bool('xenapi_remap_vbd_dev', False,
+ 'Used to enable the remapping of VBD dev '
+ '(Works around an issue in Ubuntu Maverick)')
+flags.DEFINE_string('xenapi_remap_vbd_dev_prefix', 'sd',
+ 'Specify prefix to remap VBD dev to '
+ '(ex. /dev/xvdb -> /dev/sdb)')
def get_connection(_):
@@ -290,19 +298,14 @@ class XenAPISession(object):
return
elif status == "success":
result = self._session.xenapi.task.get_result(task)
- LOG.info(_("Task [%s] %s status: success %s") % (
- name,
- task,
- result))
+ LOG.info(_("Task [%(name)s] %(task)s status:"
+ " success %(result)s") % locals())
done.send(_parse_xmlrpc_value(result))
else:
error_info = self._session.xenapi.task.get_error_info(task)
action["error"] = str(error_info)
- LOG.warn(_("Task [%s] %s status: %s %s") % (
- name,
- task,
- status,
- error_info))
+ LOG.warn(_("Task [%(name)s] %(task)s status:"
+ " %(status)s %(error_info)s") % locals())
done.send_exception(self.XenAPI.Failure(error_info))
db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc: