summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/baremetal/pxe.py2
-rwxr-xr-xnova/virt/baremetal/tilera.py4
-rwxr-xr-xnova/virt/baremetal/tilera_pdu.py2
-rw-r--r--nova/virt/configdrive.py4
-rw-r--r--nova/virt/hyperv/vmutils.py2
-rw-r--r--nova/virt/libvirt/blockinfo.py2
-rwxr-xr-xnova/virt/libvirt/driver.py86
-rw-r--r--nova/virt/libvirt/vif.py131
-rw-r--r--nova/virt/powervm/lpar.py2
-rw-r--r--nova/virt/powervm/operator.py4
-rwxr-xr-xnova/virt/vmwareapi/driver.py22
-rw-r--r--nova/virt/vmwareapi/fake.py75
-rw-r--r--nova/virt/vmwareapi/vm_util.py110
-rw-r--r--nova/virt/vmwareapi/vmops.py20
-rw-r--r--nova/virt/xenapi/vm_utils.py28
-rw-r--r--nova/virt/xenapi/volume_utils.py12
16 files changed, 435 insertions, 71 deletions
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index f44a5f87a..21fc2ce47 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -213,7 +213,7 @@ def get_tftp_image_info(instance, instance_type):
image_info['ramdisk'][0] = str(instance['ramdisk_id'])
image_info['deploy_kernel'][0] = get_deploy_aki_id(instance_type)
image_info['deploy_ramdisk'][0] = get_deploy_ari_id(instance_type)
- except KeyError as e:
+ except KeyError:
pass
missing_labels = []
diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py
index bb89a5f94..36127bfa2 100755
--- a/nova/virt/baremetal/tilera.py
+++ b/nova/virt/baremetal/tilera.py
@@ -106,7 +106,7 @@ def get_tftp_image_info(instance):
}
try:
image_info['kernel'][0] = str(instance['kernel_id'])
- except KeyError as e:
+ except KeyError:
pass
missing_labels = []
@@ -347,7 +347,7 @@ class Tilera(base.NodeDriver):
user_data = instance['user_data']
try:
self._iptables_set(node_ip, user_data)
- except Exception as ex:
+ except Exception:
self.deactivate_bootloader(context, node, instance)
raise exception.NovaException(_("Node is "
"unknown error state."))
diff --git a/nova/virt/baremetal/tilera_pdu.py b/nova/virt/baremetal/tilera_pdu.py
index 0e491168f..90f9287e4 100755
--- a/nova/virt/baremetal/tilera_pdu.py
+++ b/nova/virt/baremetal/tilera_pdu.py
@@ -109,7 +109,7 @@ class Pdu(base.PowerManager):
return CONF.baremetal.tile_pdu_off
else:
try:
- out = utils.execute(CONF.baremetal.tile_pdu_mgr,
+ utils.execute(CONF.baremetal.tile_pdu_mgr,
CONF.baremetal.tile_pdu_ip, mode)
time.sleep(CONF.baremetal.tile_power_wait)
return mode
diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py
index 173dd457b..d4038457f 100644
--- a/nova/virt/configdrive.py
+++ b/nova/virt/configdrive.py
@@ -179,7 +179,3 @@ class ConfigDriveBuilder(object):
def required_by(instance):
return instance.get('config_drive') or CONF.force_config_drive
-
-
-def enabled_for(instance):
- return required_by(instance) or instance.get('config_drive_id')
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index 680ec2d61..2cc40a1de 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -335,7 +335,7 @@ class VMUtils(object):
def get_vm_storage_paths(self, vm_name):
vm = self._lookup_vm_check(vm_name)
- vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
+ self._conn.Msvm_VirtualSystemManagementService()
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py
index aabcef964..55bf4fcd1 100644
--- a/nova/virt/libvirt/blockinfo.py
+++ b/nova/virt/libvirt/blockinfo.py
@@ -411,7 +411,7 @@ def get_disk_mapping(virt_type, instance,
'dev': disk_dev,
'type': 'disk'}
- if configdrive.enabled_for(instance):
+ if configdrive.required_by(instance):
config_info = get_next_disk_info(mapping,
disk_bus,
last_device=True)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index f4ba24cc4..f20a27900 100755
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -555,7 +555,7 @@ class LibvirtDriver(driver.ComputeDriver):
event_thread.start()
LOG.debug("Starting green dispatch thread")
- dispatch_thread = eventlet.spawn(self._dispatch_thread)
+ eventlet.spawn(self._dispatch_thread)
def init_host(self, host):
if not self.has_min_version(MIN_LIBVIRT_VERSION):
@@ -593,7 +593,7 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
- except Exception as e:
+ except Exception:
LOG.warn(_("URI %s does not support events"),
self.uri())
@@ -1116,7 +1116,7 @@ class LibvirtDriver(driver.ComputeDriver):
if state == power_state.RUNNING:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
- except libvirt.libvirtError as ex:
+ except libvirt.libvirtError:
LOG.error(_('attaching network adapter failed.'),
instance=instance)
self.vif_driver.unplug(instance, (network, mapping))
@@ -1386,19 +1386,22 @@ class LibvirtDriver(driver.ComputeDriver):
state = LIBVIRT_POWER_STATE[state]
new_domid = dom.ID()
- if state in [power_state.SHUTDOWN,
- power_state.CRASHED]:
- LOG.info(_("Instance shutdown successfully."),
- instance=instance)
- self._create_domain(domain=dom)
- timer = loopingcall.FixedIntervalLoopingCall(
- self._wait_for_running, instance)
- timer.start(interval=0.5).wait()
- return True
- elif old_domid != new_domid:
- LOG.info(_("Instance may have been rebooted during soft "
- "reboot, so return now."), instance=instance)
- return True
+ # NOTE(ivoks): By checking domain IDs, we make sure we are
+ # not recreating domain that's already running.
+ if old_domid != new_domid:
+ if state in [power_state.SHUTDOWN,
+ power_state.CRASHED]:
+ LOG.info(_("Instance shutdown successfully."),
+ instance=instance)
+ self._create_domain(domain=dom)
+ timer = loopingcall.FixedIntervalLoopingCall(
+ self._wait_for_running, instance)
+ timer.start(interval=0.5).wait()
+ return True
+ else:
+ LOG.info(_("Instance may have been rebooted during soft "
+ "reboot, so return now."), instance=instance)
+ return True
greenthread.sleep(1)
return False
@@ -1431,7 +1434,8 @@ class LibvirtDriver(driver.ComputeDriver):
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
- disk_info_json = self.get_instance_disk_info(instance['name'], xml)
+ disk_info_json = self.get_instance_disk_info(instance['name'], xml,
+ block_device_info)
self._create_images_and_backing(context, instance, disk_info_json)
# Initialize all the necessary networking, block devices and
@@ -2503,10 +2507,28 @@ class LibvirtDriver(driver.ComputeDriver):
use_cow=CONF.use_cow_images)
if xml:
- domain = self._conn.defineXML(xml)
+ try:
+ domain = self._conn.defineXML(xml)
+ except Exception as e:
+ LOG.error(_("An error occurred while trying to define a domain"
+ " with xml: %s") % xml)
+ raise e
+
if power_on:
- domain.createWithFlags(launch_flags)
- self._enable_hairpin(domain.XMLDesc(0))
+ try:
+ domain.createWithFlags(launch_flags)
+ except Exception as e:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_("An error occurred while trying to launch a "
+ "defined domain with xml: %s") %
+ domain.XMLDesc(0))
+
+ try:
+ self._enable_hairpin(domain.XMLDesc(0))
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_("An error occurred while enabling hairpin mode on "
+ "domain with xml: %s") % domain.XMLDesc(0))
# NOTE(uni): Now the container is running with its own private mount
# namespace and so there is no need to keep the container rootfs
@@ -3054,7 +3076,6 @@ class LibvirtDriver(driver.ComputeDriver):
(disk_available_gb * 1024) - CONF.reserved_host_disk_mb
# Compare CPU
- src = instance['host']
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(source_cpu_info)
@@ -3519,7 +3540,8 @@ class LibvirtDriver(driver.ComputeDriver):
dom = self._lookup_by_name(instance["name"])
self._conn.defineXML(dom.XMLDesc(0))
- def get_instance_disk_info(self, instance_name, xml=None):
+ def get_instance_disk_info(self, instance_name, xml=None,
+ block_device_info=None):
"""Preparation block migration.
:params instance:
@@ -3552,15 +3574,27 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance_name)
+ # NOTE (rmk): When block_device_info is provided, we will use it to
+ # filter out devices which are actually volumes.
+ block_device_mapping = driver.block_device_info_get_mapping(
+ block_device_info)
+
+ volume_devices = set()
+ for vol in block_device_mapping:
+ disk_dev = vol['mount_device'].rpartition("/")[2]
+ volume_devices.add(disk_dev)
+
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
+ target_nodes = doc.findall('.//devices/disk/target')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file')
+ target = target_nodes[cnt].attrib['dev']
if disk_type != 'file':
LOG.debug(_('skipping %s since it looks like volume'), path)
@@ -3571,6 +3605,11 @@ class LibvirtDriver(driver.ComputeDriver):
instance_name)
continue
+ if target in volume_devices:
+ LOG.debug(_('skipping disk %(path)s (%(target)s) as it is a '
+ 'volume'), {'path': path, 'target': target})
+ continue
+
# get the real disk size or
# raise a localized error if image is unavailable
dk_size = int(os.path.getsize(path))
@@ -3679,7 +3718,8 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info=None):
LOG.debug(_("Starting migrate_disk_and_power_off"),
instance=instance)
- disk_info_text = self.get_instance_disk_info(instance['name'])
+ disk_info_text = self.get_instance_disk_info(instance['name'],
+ block_device_info=block_device_info)
disk_info = jsonutils.loads(disk_info_text)
# copy disks to destination
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index d0a2283e7..9b33a12fe 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -228,6 +228,36 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
mapping,
image_meta)
+ def get_config_ivs_hybrid(self, instance, network, mapping, image_meta):
+ newnet = copy.deepcopy(network)
+ newnet['bridge'] = self.get_br_name(mapping['vif_uuid'])
+ return self.get_config_bridge(instance,
+ newnet,
+ mapping,
+ image_meta)
+
+ def get_config_ivs_ethernet(self, instance, network, mapping, image_meta):
+ conf = super(LibvirtGenericVIFDriver,
+ self).get_config(instance,
+ network,
+ mapping,
+ image_meta)
+
+ dev = self.get_vif_devname(mapping)
+ designer.set_vif_host_backend_ethernet_config(conf, dev)
+
+ return conf
+
+ def get_config_ivs(self, instance, network, mapping, image_meta):
+ if self.get_firewall_required():
+ return self.get_config_ivs_hybrid(instance, network,
+ mapping,
+ image_meta)
+ else:
+ return self.get_config_ivs_ethernet(instance, network,
+ mapping,
+ image_meta)
+
def get_config_802qbg(self, instance, network, mapping, image_meta):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance,
@@ -271,8 +301,7 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
-
- if vif_type == network_model.VIF_TYPE_BRIDGE:
+ elif vif_type == network_model.VIF_TYPE_BRIDGE:
return self.get_config_bridge(instance,
network, mapping,
image_meta)
@@ -288,6 +317,10 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
return self.get_config_802qbh(instance,
network, mapping,
image_meta)
+ elif vif_type == network_model.VIF_TYPE_IVS:
+ return self.get_config_ivs(instance,
+ network, mapping,
+ image_meta)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
@@ -372,6 +405,51 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
else:
self.plug_ovs_ethernet(instance, vif)
+ def plug_ivs_ethernet(self, instance, vif):
+ super(LibvirtGenericVIFDriver,
+ self).plug(instance, vif)
+
+ network, mapping = vif
+ iface_id = self.get_ovs_interfaceid(mapping)
+ dev = self.get_vif_devname(mapping)
+ linux_net.create_tap_dev(dev)
+ linux_net.create_ivs_vif_port(dev, iface_id, mapping['mac'],
+ instance['uuid'])
+
+ def plug_ivs_hybrid(self, instance, vif):
+ """Plug using hybrid strategy (same as OVS)
+
+ Create a per-VIF linux bridge, then link that bridge to the OVS
+ integration bridge via a veth device, setting up the other end
+ of the veth device just like a normal IVS port. Then boot the
+ VIF on the linux bridge using standard libvirt mechanisms.
+ """
+ super(LibvirtGenericVIFDriver,
+ self).plug(instance, vif)
+
+ network, mapping = vif
+ iface_id = self.get_ovs_interfaceid(mapping)
+ br_name = self.get_br_name(mapping['vif_uuid'])
+ v1_name, v2_name = self.get_veth_pair_names(mapping['vif_uuid'])
+
+ if not linux_net.device_exists(br_name):
+ utils.execute('brctl', 'addbr', br_name, run_as_root=True)
+ utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
+ utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
+
+ if not linux_net.device_exists(v2_name):
+ linux_net._create_veth_pair(v1_name, v2_name)
+ utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
+ utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
+ linux_net.create_ivs_vif_port(v2_name, iface_id, mapping['mac'],
+ instance['uuid'])
+
+ def plug_ivs(self, instance, vif):
+ if self.get_firewall_required():
+ self.plug_ivs_hybrid(instance, vif)
+ else:
+ self.plug_ivs_ethernet(instance, vif)
+
def plug_802qbg(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
@@ -393,8 +471,7 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
-
- if vif_type == network_model.VIF_TYPE_BRIDGE:
+ elif vif_type == network_model.VIF_TYPE_BRIDGE:
self.plug_bridge(instance, vif)
elif vif_type == network_model.VIF_TYPE_OVS:
self.plug_ovs(instance, vif)
@@ -402,6 +479,8 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
self.plug_802qbg(instance, vif)
elif vif_type == network_model.VIF_TYPE_802_QBH:
self.plug_802qbh(instance, vif)
+ elif vif_type == network_model.VIF_TYPE_IVS:
+ self.plug_ivs(instance, vif)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
@@ -460,6 +539,45 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
else:
self.unplug_ovs_ethernet(instance, vif)
+ def unplug_ivs_ethernet(self, instance, vif):
+ """Unplug the VIF by deleting the port from the bridge."""
+ super(LibvirtGenericVIFDriver,
+ self).unplug(instance, vif)
+
+ try:
+ network, mapping = vif
+ linux_net.delete_ivs_vif_port(self.get_vif_devname(mapping))
+ except exception.ProcessExecutionError:
+ LOG.exception(_("Failed while unplugging vif"), instance=instance)
+
+ def unplug_ivs_hybrid(self, instance, vif):
+ """UnPlug using hybrid strategy (same as OVS)
+
+ Unhook port from IVS, unhook port from bridge, delete
+ bridge, and delete both veth devices.
+ """
+ super(LibvirtGenericVIFDriver,
+ self).unplug(instance, vif)
+
+ try:
+ network, mapping = vif
+ br_name = self.get_br_name(mapping['vif_uuid'])
+ v1_name, v2_name = self.get_veth_pair_names(mapping['vif_uuid'])
+
+ utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
+ utils.execute('ip', 'link', 'set', br_name, 'down',
+ run_as_root=True)
+ utils.execute('brctl', 'delbr', br_name, run_as_root=True)
+ linux_net.delete_ivs_vif_port(v2_name)
+ except exception.ProcessExecutionError:
+ LOG.exception(_("Failed while unplugging vif"), instance=instance)
+
+ def unplug_ivs(self, instance, vif):
+ if self.get_firewall_required():
+ self.unplug_ovs_hybrid(instance, vif)
+ else:
+ self.unplug_ovs_ethernet(instance, vif)
+
def unplug_802qbg(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
@@ -481,8 +599,7 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
-
- if vif_type == network_model.VIF_TYPE_BRIDGE:
+ elif vif_type == network_model.VIF_TYPE_BRIDGE:
self.unplug_bridge(instance, vif)
elif vif_type == network_model.VIF_TYPE_OVS:
self.unplug_ovs(instance, vif)
@@ -490,6 +607,8 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
self.unplug_802qbg(instance, vif)
elif vif_type == network_model.VIF_TYPE_802_QBH:
self.unplug_802qbh(instance, vif)
+ elif vif_type == network_model.VIF_TYPE_IVS:
+ self.unplug_ivs(instance, vif)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
diff --git a/nova/virt/powervm/lpar.py b/nova/virt/powervm/lpar.py
index 7be8b046a..a6c782692 100644
--- a/nova/virt/powervm/lpar.py
+++ b/nova/virt/powervm/lpar.py
@@ -51,7 +51,7 @@ def load_from_conf_data(conf_data):
for (key, value) in attribs.items():
try:
lpar[key] = value
- except exception.PowerVMLPARAttributeNotFound as e:
+ except exception.PowerVMLPARAttributeNotFound:
LOG.info(_('Encountered unknown LPAR attribute: %s\n'
'Continuing without storing') % key)
return lpar
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index 18cba0ba2..fffb77fc9 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -766,11 +766,11 @@ class BaseOperator(object):
def _decompress_image_file(self, file_path, outfile_path):
command = "/usr/bin/gunzip -c %s > %s" % (file_path, outfile_path)
- output = self.run_vios_command_as_root(command)
+ self.run_vios_command_as_root(command)
# Remove compressed image file
command = "/usr/bin/rm %s" % file_path
- output = self.run_vios_command_as_root(command)
+ self.run_vios_command_as_root(command)
return outfile_path
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 1f91e5ab2..8389b2c3d 100755
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -127,6 +127,12 @@ class Failure(Exception):
class VMwareESXDriver(driver.ComputeDriver):
"""The ESX host connection object."""
+ # VMwareAPI has both ESXi and vCenter API sets.
+ # The ESXi API are a proper sub-set of the vCenter API.
+ # That is to say, nearly all valid ESXi calls are
+ # valid vCenter calls. There are some small edge-case
+ # exceptions regarding VNC, CIM, User management & SSO.
+
def __init__(self, virtapi, read_only=False, scheme="https"):
super(VMwareESXDriver, self).__init__(virtapi)
@@ -338,6 +344,14 @@ class VMwareESXDriver(driver.ComputeDriver):
class VMwareVCDriver(VMwareESXDriver):
"""The ESX host connection object."""
+ # The vCenter driver includes several additional VMware vSphere
+ # capabilities that include API that act on hosts or groups of
+ # hosts in clusters or non-cluster logical-groupings.
+ #
+ # vCenter is not a hypervisor itself, it works with multiple
+ # hypervisor host machines and their guests. This fact can
+ # subtly alter how vSphere and OpenStack interoperate.
+
def __init__(self, virtapi, read_only=False, scheme="https"):
super(VMwareVCDriver, self).__init__(virtapi)
self._cluster_name = CONF.vmwareapi_cluster_name
@@ -399,6 +413,14 @@ class VMwareVCDriver(VMwareESXDriver):
post_method, recover_method,
block_migration)
+ def get_vnc_console(self, instance):
+ """Return link to instance's VNC console using vCenter logic."""
+ # In this situation, ESXi and vCenter require different
+ # API logic to create a valid VNC console connection object.
+ # In specific, vCenter does not actually run the VNC service
+ # itself. You must talk to the VNC host underneath vCenter.
+ return self._vmops.get_vnc_console_vcenter(instance)
+
class VMwareAPISession(object):
"""
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index cd8302115..abf896c79 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -21,6 +21,7 @@
A fake VMware VI API implementation.
"""
+import collections
import pprint
import uuid
@@ -83,28 +84,72 @@ def _get_objects(obj_type):
return lst_objs
-class Prop(object):
+class Property(object):
"""Property Object base class."""
- def __init__(self):
- self.name = None
- self.val = None
+ def __init__(self, name=None, val=None):
+ self.name = name
+ self.val = val
+
+class ManagedObjectReference(object):
+ """A managed object reference is a remote identifier."""
-class Obj(object):
- def __init__(self, name, value):
+ def __init__(self, value="object-123", _type="ManagedObject"):
+ super(ManagedObjectReference, self)
+ # Managed Object Reference value attributes
+ # typically have values like vm-123 or
+ # host-232 and not UUID.
self.value = value
- self._type = name
+ # Managed Object Reference _type
+ # attributes hold the name of the type
+ # of the vCenter object the value
+ # attribute is the identifier for
+ self._type = _type
+
+
+class ObjectContent(object):
+ """ObjectContent array holds dynamic properties."""
+
+ # This class is a *fake* of a class sent back to us by
+ # SOAP. It has its own names. These names are decided
+ # for us by the API we are *faking* here.
+ def __init__(self, obj_ref, prop_list=None, missing_list=None):
+ self.obj = obj_ref
+
+ if not isinstance(prop_list, collections.Iterable):
+ prop_list = []
+
+ if not isinstance(missing_list, collections.Iterable):
+ missing_list = []
+
+ # propSet is the name your Python code will need to
+ # use since this is the name that the API will use
+ self.propSet = prop_list
+
+ # missingSet is the name your python code will
+ # need to use since this is the name that the
+ # API we are talking to will use.
+ self.missingSet = missing_list
class ManagedObject(object):
- """Managed Data Object base class."""
+ """Managed Object base class."""
def __init__(self, name="ManagedObject", obj_ref=None, value=None):
"""Sets the obj property which acts as a reference to the object."""
super(ManagedObject, self).__setattr__('objName', name)
+
+ # A managed object is a local representation of a
+ # remote object that you can reference using the
+ # object reference.
if obj_ref is None:
- obj_ref = Obj(name, value)
+ if value is None:
+ value = 'obj-123'
+ obj_ref = ManagedObjectReference(value, name)
+
+ # we use __setattr__ here because below the
+ # default setter has been altered for this class.
object.__setattr__(self, 'obj', obj_ref)
object.__setattr__(self, 'propSet', [])
@@ -124,16 +169,20 @@ class ManagedObject(object):
return self.__getattr__(attr)
def __setattr__(self, attr, val):
+ # TODO(hartsocks): this is adds unnecessary complexity to the class
for prop in self.propSet:
if prop.name == attr:
prop.val = val
return
- elem = Prop()
+ elem = Property()
elem.name = attr
elem.val = val
self.propSet.append(elem)
def __getattr__(self, attr):
+ # TODO(hartsocks): remove this
+ # in a real ManagedObject you have to iterate the propSet
+ # in a real ManagedObject, the propSet is a *set* not a list
for elem in self.propSet:
if elem.name == attr:
return elem.val
@@ -215,6 +264,8 @@ class VirtualMachine(ManagedObject):
self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
self.set("config.hardware.device", kwargs.get("virtual_device", None))
self.set("config.extraConfig", kwargs.get("extra_config", None))
+ self.set('runtime.host',
+ ManagedObjectReference(value='host-123', _type="HostSystem"))
self.device = kwargs.get("virtual_device")
def reconfig(self, factory, val):
@@ -310,8 +361,8 @@ class HostNetworkSystem(ManagedObject):
class HostSystem(ManagedObject):
"""Host System class."""
- def __init__(self):
- super(HostSystem, self).__init__("HostSystem")
+ def __init__(self, obj_ref=None, value='host-123'):
+ super(HostSystem, self).__init__("HostSystem", obj_ref, value)
self.set("name", "ha-host")
if _db_content.get("HostNetworkSystem", None) is None:
create_host_network_system()
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index fecac5bcc..d8e063cad 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -20,6 +20,7 @@ The VMware API VM utility module to build SOAP object specs.
"""
import copy
+
from nova import exception
from nova.virt.vmwareapi import vim_util
@@ -50,6 +51,10 @@ def get_vm_create_spec(client_factory, instance, data_store_name,
config_spec.name = instance['uuid']
config_spec.guestId = os_type
+ # Allow nested ESX instances to host 64 bit VMs.
+ if os_type == "vmkernel5Guest":
+ config_spec.nestedHVEnabled = "True"
+
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
@@ -522,6 +527,111 @@ def get_vm_ref(session, instance):
return vm_ref
+def get_host_ref_from_id(session, host_id, property_list=None):
+ """Get a host reference object for a host_id string."""
+
+ if property_list is None:
+ property_list = ['name']
+
+ host_refs = session._call_method(
+ vim_util, "get_objects",
+ "HostSystem", property_list)
+
+ for ref in host_refs:
+ if ref.obj.value == host_id:
+ return ref
+
+
+def get_host_id_from_vm_ref(session, vm_ref):
+ """
+ This method allows you to find the managed object
+ ID of the host running a VM. Since vMotion can
+ change the value, you should not presume that this
+ is a value that you can cache for very long and
+ should be prepared to allow for it to change.
+
+ :param session: a vSphere API connection
+ :param vm_ref: a reference object to the running VM
+ :return: the host_id running the virtual machine
+ """
+
+ # to prevent typographical errors below
+ property_name = 'runtime.host'
+
+ # a property collector in VMware vSphere Management API
+ # is a set of local representations of remote values.
+ # property_set here, is a local representation of the
+ # properties we are querying for.
+ property_set = session._call_method(
+ vim_util, "get_object_properties",
+ None, vm_ref, vm_ref._type, [property_name])
+
+ prop = property_from_property_set(
+ property_name, property_set)
+
+ if prop is not None:
+ prop = prop.val.value
+ else:
+ # reaching here represents an impossible state
+ raise RuntimeError(
+ "Virtual Machine %s exists without a runtime.host!"
+ % (vm_ref))
+
+ return prop
+
+
+def property_from_property_set(property_name, property_set):
+ '''
+ Use this method to filter property collector results.
+
+ Because network traffic is expensive, multiple
+ VMwareAPI calls will sometimes pile-up properties
+ to be collected. That means results may contain
+ many different values for multiple purposes.
+
+ This helper will filter a list for a single result
+ and filter the properties of that result to find
+ the single value of whatever type resides in that
+ result. This could be a ManagedObjectReference ID
+ or a complex value.
+
+ :param property_name: name of property you want
+ :param property_set: all results from query
+ :return: the value of the property.
+ '''
+
+ for prop in property_set:
+ p = _property_from_propSet(prop.propSet, property_name)
+ if p is not None:
+ return p
+
+
+def _property_from_propSet(propSet, name='name'):
+ for p in propSet:
+ if p.name == name:
+ return p
+
+
+def get_host_ref_for_vm(session, instance, props):
+ """Get the ESXi host running a VM by its name."""
+
+ vm_ref = get_vm_ref(session, instance)
+ host_id = get_host_id_from_vm_ref(session, vm_ref)
+ return get_host_ref_from_id(session, host_id, props)
+
+
+def get_host_name_for_vm(session, instance):
+ """Get the ESXi host running a VM by its name."""
+ host_ref = get_host_ref_for_vm(session, instance, ['name'])
+ return get_host_name_from_host_ref(host_ref)
+
+
+def get_host_name_from_host_ref(host_ref):
+ p = _property_from_propSet(host_ref.propSet)
+ if p is not None:
+ return p.val
+
+
def get_cluster_ref_from_name(session, cluster_name):
"""Get reference to the cluster with the name specified."""
cls = session._call_method(vim_util, "get_objects",
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 1afead6b9..e8f63f1d7 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -1089,6 +1089,26 @@ class VMwareVMOps(object):
'port': self._get_vnc_port(vm_ref),
'internal_access_path': None}
+ def get_vnc_console_vcenter(self, instance):
+ """Return connection info for a vnc console using vCenter logic."""
+
+ # vCenter does not run virtual machines and does not run
+ # a VNC proxy. Instead, you need to tell OpenStack to talk
+ # directly to the ESX host running the VM you are attempting
+ # to connect to via VNC.
+
+ vnc_console = self.get_vnc_console(instance)
+ host_name = vm_util.get_host_name_for_vm(
+ self._session,
+ instance)
+ vnc_console['host'] = host_name
+
+ # NOTE: VM can move hosts in some situations. Debug for admins.
+ LOG.debug(_("VM %(uuid)s is currently on host %(host_name)s"),
+ {'uuid': instance['name'], 'host_name': host_name})
+
+ return vnc_console
+
@staticmethod
def _get_vnc_port(vm_ref):
"""Return VNC port for an VM."""
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index ac8c9c58b..6e9f09184 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -1142,7 +1142,7 @@ def _fetch_vhd_image(context, session, instance, image_id):
if _image_uses_bittorrent(context, instance):
plugin_name = 'bittorrent'
callback = None
- _add_bittorrent_params(params)
+ _add_bittorrent_params(image_id, params)
else:
plugin_name = 'glance'
callback = _generate_glance_callback(context)
@@ -1180,20 +1180,18 @@ def _generate_glance_callback(context):
return pick_glance
-def _add_bittorrent_params(params):
- params['torrent_base_url'] = CONF.xenapi_torrent_base_url
- params['torrent_seed_duration'] = CONF.xenapi_torrent_seed_duration
- params['torrent_seed_chance'] = CONF.xenapi_torrent_seed_chance
- params['torrent_max_last_accessed'] =\
- CONF.xenapi_torrent_max_last_accessed
- params['torrent_listen_port_start'] =\
- CONF.xenapi_torrent_listen_port_start
- params['torrent_listen_port_end'] =\
- CONF.xenapi_torrent_listen_port_end
- params['torrent_download_stall_cutoff'] =\
- CONF.xenapi_torrent_download_stall_cutoff
- params['torrent_max_seeder_processes_per_host'] =\
- CONF.xenapi_torrent_max_seeder_processes_per_host
+def _add_bittorrent_params(image_id, params):
+ params['torrent_url'] = urlparse.urljoin(CONF.xenapi_torrent_base_url,
+ "%s.torrent" % image_id)
+ params['torrent_seed_duration'] = CONF.xenapi_torrent_seed_duration
+ params['torrent_seed_chance'] = CONF.xenapi_torrent_seed_chance
+ params['torrent_max_last_accessed'] = CONF.xenapi_torrent_max_last_accessed
+ params['torrent_listen_port_start'] = CONF.xenapi_torrent_listen_port_start
+ params['torrent_listen_port_end'] = CONF.xenapi_torrent_listen_port_end
+ params['torrent_download_stall_cutoff'] = \
+ CONF.xenapi_torrent_download_stall_cutoff
+ params['torrent_max_seeder_processes_per_host'] = \
+ CONF.xenapi_torrent_max_seeder_processes_per_host
def _get_vdi_chain_size(session, vdi_uuid):
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 7a0b4a67e..853bc3262 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -233,8 +233,16 @@ def parse_volume_info(connection_data):
target_host = _get_target_host(target_portal)
target_port = _get_target_port(target_portal)
target_iqn = connection_data['target_iqn']
- LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)',
- (volume_id, target_host, target_port, target_iqn))
+
+ log_params = {
+ "vol_id": volume_id,
+ "host": target_host,
+ "port": target_port,
+ "iqn": target_iqn
+ }
+ LOG.debug(_('(vol_id,host,port,iqn): '
+ '(%(vol_id)s,%(host)s,%(port)s,%(iqn)s)'), log_params)
+
if (volume_id is None or
target_host is None or
target_iqn is None):