summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2013-02-05 01:27:03 +0000
committerGerrit Code Review <review@openstack.org>2013-02-05 01:27:03 +0000
commit8c94d3eca632ae3bf5be7eee110e23f6bc62abf7 (patch)
tree7edae8209dc74e773f11b69081925d50c37baef6
parent65949390762c41200ef8312ea934ae6bc630a0bd (diff)
parenteaef552a1f0c4161d8b303aa4a0090ece9ab9a4e (diff)
downloadnova-8c94d3eca632ae3bf5be7eee110e23f6bc62abf7.tar.gz
nova-8c94d3eca632ae3bf5be7eee110e23f6bc62abf7.tar.xz
nova-8c94d3eca632ae3bf5be7eee110e23f6bc62abf7.zip
Merge "VMware VC Compute Driver"
-rw-r--r--nova/virt/vmwareapi/__init__.py1
-rw-r--r--nova/virt/vmwareapi/driver.py132
-rw-r--r--nova/virt/vmwareapi/host.py65
-rw-r--r--nova/virt/vmwareapi/network_util.py36
-rw-r--r--nova/virt/vmwareapi/vif.py15
-rw-r--r--nova/virt/vmwareapi/vm_util.py58
-rw-r--r--nova/virt/vmwareapi/vmops.py56
-rw-r--r--nova/virt/vmwareapi/volume_util.py24
-rw-r--r--nova/virt/vmwareapi/volumeops.py20
9 files changed, 296 insertions, 111 deletions
diff --git a/nova/virt/vmwareapi/__init__.py b/nova/virt/vmwareapi/__init__.py
index 37d816f8c..1b9732b44 100644
--- a/nova/virt/vmwareapi/__init__.py
+++ b/nova/virt/vmwareapi/__init__.py
@@ -21,3 +21,4 @@
from nova.virt.vmwareapi import driver
VMwareESXDriver = driver.VMwareESXDriver
+VMwareVCDriver = driver.VMwareVCDriver
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index dadfd5ca8..3e9269530 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -21,9 +21,10 @@ A connection to the VMware ESX platform.
**Related Flags**
-:vmwareapi_host_ip: IP address of VMware ESX server.
-:vmwareapi_host_username: Username for connection to VMware ESX Server.
-:vmwareapi_host_password: Password for connection to VMware ESX Server.
+:vmwareapi_host_ip: IP address or Name of VMware ESX/VC server.
+:vmwareapi_host_username: Username for connection to VMware ESX/VC Server.
+:vmwareapi_host_password: Password for connection to VMware ESX/VC Server.
+:vmwareapi_cluster_name: Name of a VMware Cluster ComputeResource.
:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
remote tasks
(default: 5.0).
@@ -50,6 +51,7 @@ from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import host
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import volumeops
@@ -59,30 +61,37 @@ LOG = logging.getLogger(__name__)
vmwareapi_opts = [
cfg.StrOpt('vmwareapi_host_ip',
default=None,
- help='URL for connection to VMware ESX host. Required if '
- 'compute_driver is vmwareapi.VMwareESXDriver.'),
+ help='URL for connection to VMware ESX/VC host. Required if '
+ 'compute_driver is vmwareapi.VMwareESXDriver or '
+ 'vmwareapi.VMwareVCDriver.'),
cfg.StrOpt('vmwareapi_host_username',
default=None,
- help='Username for connection to VMware ESX host. '
+ help='Username for connection to VMware ESX/VC host. '
'Used only if compute_driver is '
- 'vmwareapi.VMwareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.'),
cfg.StrOpt('vmwareapi_host_password',
default=None,
- help='Password for connection to VMware ESX host. '
+ help='Password for connection to VMware ESX/VC host. '
'Used only if compute_driver is '
- 'vmwareapi.VMwareESXDriver.',
+ 'vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.',
secret=True),
+ cfg.StrOpt('vmwareapi_cluster_name',
+ default=None,
+ help='Name of a VMware Cluster ComputeResource. '
+ 'Used only if compute_driver is '
+ 'vmwareapi.VMwareVCDriver.'),
cfg.FloatOpt('vmwareapi_task_poll_interval',
default=5.0,
help='The interval used for polling of remote tasks. '
'Used only if compute_driver is '
- 'vmwareapi.VMwareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver or '
+ 'vmwareapi.VMwareVCDriver.'),
cfg.IntOpt('vmwareapi_api_retry_count',
default=10,
help='The number of times we retry on failures, e.g., '
'socket error, etc. '
'Used only if compute_driver is '
- 'vmwareapi.VMwareESXDriver.'),
+ 'vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver.'),
cfg.IntOpt('vnc_port',
default=5900,
help='VNC starting port'),
@@ -128,14 +137,17 @@ class VMwareESXDriver(driver.ComputeDriver):
raise Exception(_("Must specify vmwareapi_host_ip,"
"vmwareapi_host_username "
"and vmwareapi_host_password to use"
- "compute_driver=vmwareapi.VMwareESXDriver"))
+ "compute_driver=vmwareapi.VMwareESXDriver or "
+ "vmwareapi.VMwareVCDriver"))
self._session = VMwareAPISession(self._host_ip,
host_username, host_password,
api_retry_count, scheme=scheme)
- self._volumeops = volumeops.VMwareVolumeOps(self._session)
+ self._cluster_name = CONF.vmwareapi_cluster_name
+ self._volumeops = volumeops.VMwareVolumeOps(self._session,
+ self._cluster_name)
self._vmops = vmops.VMwareVMOps(self._session, self.virtapi,
- self._volumeops)
+ self._volumeops, self._cluster_name)
self._host = host.Host(self._session)
self._host_state = None
@@ -211,40 +223,6 @@ class VMwareESXDriver(driver.ComputeDriver):
"""Power on the specified instance."""
self._vmops.power_on(instance)
- def migrate_disk_and_power_off(self, context, instance, dest,
- instance_type, network_info,
- block_device_info=None):
- """
- Transfers the disk of a running instance in multiple phases, turning
- off the instance before the end.
- """
- return self._vmops.migrate_disk_and_power_off(context, instance,
- dest, instance_type)
-
- def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM."""
- self._vmops.confirm_migration(migration, instance, network_info)
-
- def finish_revert_migration(self, instance, network_info,
- block_device_info=None):
- """Finish reverting a resize, powering back on the instance."""
- self._vmops.finish_revert_migration(instance)
-
- def finish_migration(self, context, migration, instance, disk_info,
- network_info, image_meta, resize_instance=False,
- block_device_info=None):
- """Completes a resize, turning on the migrated instance."""
- self._vmops.finish_migration(context, migration, instance, disk_info,
- network_info, image_meta, resize_instance)
-
- def live_migration(self, context, instance_ref, dest,
- post_method, recover_method, block_migration=False,
- migrate_data=None):
- """Live migration of an instance to another host."""
- self._vmops.live_migration(context, instance_ref, dest,
- post_method, recover_method,
- block_migration)
-
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
@@ -363,6 +341,64 @@ class VMwareESXDriver(driver.ComputeDriver):
return self._vmops.list_interfaces(instance_name)
+class VMwareVCDriver(VMwareESXDriver):
+ """The ESX host connection object."""
+
+ def __init__(self, virtapi, read_only=False, scheme="https"):
+ super(VMwareVCDriver, self).__init__(virtapi)
+ if not self._cluster_name:
+ self._cluster = None
+ else:
+ self._cluster = vm_util.get_cluster_ref_from_name(
+ self._session, self._cluster_name)
+ if self._cluster is None:
+ raise exception.NotFound(_("VMware Cluster %s is not found")
+ % self._cluster_name)
+ self._vc_state = None
+
+ @property
+ def host_state(self):
+ if not self._vc_state:
+ self._vc_state = host.VCState(self._session,
+ self._host_ip,
+ self._cluster)
+ return self._vc_state
+
+ def migrate_disk_and_power_off(self, context, instance, dest,
+ instance_type, network_info,
+ block_device_info=None):
+ """
+ Transfers the disk of a running instance in multiple phases, turning
+ off the instance before the end.
+ """
+ return self._vmops.migrate_disk_and_power_off(context, instance,
+ dest, instance_type)
+
+ def confirm_migration(self, migration, instance, network_info):
+ """Confirms a resize, destroying the source VM."""
+ self._vmops.confirm_migration(migration, instance, network_info)
+
+ def finish_revert_migration(self, instance, network_info,
+ block_device_info=None):
+ """Finish reverting a resize, powering back on the instance."""
+ self._vmops.finish_revert_migration(instance)
+
+ def finish_migration(self, context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance=False,
+ block_device_info=None):
+ """Completes a resize, turning on the migrated instance."""
+ self._vmops.finish_migration(context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance)
+
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method, block_migration=False,
+ migrate_data=None):
+ """Live migration of an instance to another host."""
+ self._vmops.live_migration(context, instance_ref, dest,
+ post_method, recover_method,
+ block_migration)
+
+
class VMwareAPISession(object):
"""
Sets up a session with the ESX host and handles all
diff --git a/nova/virt/vmwareapi/host.py b/nova/virt/vmwareapi/host.py
index 09b8f1fe3..9d11901d6 100644
--- a/nova/virt/vmwareapi/host.py
+++ b/nova/virt/vmwareapi/host.py
@@ -138,3 +138,68 @@ class HostState(object):
self._stats = data
return data
+
+
+class VCState(object):
+ """Manages information about the VC host this compute
+ node is running on.
+ """
+ def __init__(self, session, host_name, cluster):
+ super(VCState, self).__init__()
+ self._session = session
+ self._host_name = host_name
+ self._cluster = cluster
+ self._stats = {}
+ self.update_status()
+
+ def get_host_stats(self, refresh=False):
+ """Return the current state of the host. If 'refresh' is
+ True, run the update first.
+ """
+ if refresh:
+ self.update_status()
+ return self._stats
+
+ def update_status(self):
+ """Update the current state of the host.
+ """
+ host_mor = vm_util.get_host_ref(self._session, self._cluster)
+ if host_mor is None:
+ return
+
+ summary = self._session._call_method(vim_util,
+ "get_dynamic_property",
+ host_mor,
+ "HostSystem",
+ "summary")
+
+ if summary is None:
+ return
+
+ try:
+ ds = vm_util.get_datastore_ref_and_name(self._session,
+ self._cluster)
+ except exception.DatastoreNotFound:
+ ds = (None, None, 0, 0)
+
+ data = {}
+ data["vcpus"] = summary.hardware.numCpuThreads
+ data["cpu_info"] =\
+ {"vendor": summary.hardware.vendor,
+ "model": summary.hardware.cpuModel,
+ "topology": {"cores": summary.hardware.numCpuCores,
+ "sockets": summary.hardware.numCpuPkgs,
+ "threads": summary.hardware.numCpuThreads}
+ }
+ data["disk_total"] = ds[2] / (1024 * 1024)
+ data["disk_available"] = ds[3] / (1024 * 1024)
+ data["disk_used"] = data["disk_total"] - data["disk_available"]
+ data["host_memory_total"] = summary.hardware.memorySize / (1024 * 1024)
+ data["host_memory_free"] = data["host_memory_total"] -\
+ summary.quickStats.overallMemoryUsage
+ data["hypervisor_type"] = summary.config.product.name
+ data["hypervisor_version"] = summary.config.product.version
+ data["hypervisor_hostname"] = self._host_name
+
+ self._stats = data
+ return data
diff --git a/nova/virt/vmwareapi/network_util.py b/nova/virt/vmwareapi/network_util.py
index f63d7f723..5a83b0763 100644
--- a/nova/virt/vmwareapi/network_util.py
+++ b/nova/virt/vmwareapi/network_util.py
@@ -29,14 +29,22 @@ from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
-def get_network_with_the_name(session, network_name="vmnet0"):
+def get_network_with_the_name(session, network_name="vmnet0", cluster=None):
"""
Gets reference to the network whose name is passed as the
argument.
"""
- hostsystems = session._call_method(vim_util, "get_objects",
- "HostSystem", ["network"])
- vm_networks_ret = hostsystems[0].propSet[0].val
+ host = vm_util.get_host_ref(session, cluster)
+ if cluster is not None:
+ vm_networks_ret = session._call_method(vim_util,
+ "get_dynamic_property", cluster,
+ "ClusterComputeResource",
+ "network")
+ else:
+ vm_networks_ret = session._call_method(vim_util,
+ "get_dynamic_property", host,
+ "HostSystem", "network")
+
# Meaning there are no networks on the host. suds responds with a ""
# in the parent property field rather than a [] in the
# ManagedObjectReference property field of the parent
@@ -77,14 +85,13 @@ def get_network_with_the_name(session, network_name="vmnet0"):
return None
-def get_vswitch_for_vlan_interface(session, vlan_interface):
+def get_vswitch_for_vlan_interface(session, vlan_interface, cluster=None):
"""
Gets the vswitch associated with the physical network adapter
with the name supplied.
"""
# Get the list of vSwicthes on the Host System
- host_mor = session._call_method(vim_util, "get_objects",
- "HostSystem")[0].obj
+ host_mor = vm_util.get_host_ref(session, cluster)
vswitches_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "config.network.vswitch")
@@ -105,10 +112,9 @@ def get_vswitch_for_vlan_interface(session, vlan_interface):
pass
-def check_if_vlan_interface_exists(session, vlan_interface):
+def check_if_vlan_interface_exists(session, vlan_interface, cluster=None):
"""Checks if the vlan_inteface exists on the esx host."""
- host_mor = session._call_method(vim_util, "get_objects",
- "HostSystem")[0].obj
+ host_mor = vm_util.get_host_ref(session, cluster)
physical_nics_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "config.network.pnic")
@@ -122,10 +128,9 @@ def check_if_vlan_interface_exists(session, vlan_interface):
return False
-def get_vlanid_and_vswitch_for_portgroup(session, pg_name):
+def get_vlanid_and_vswitch_for_portgroup(session, pg_name, cluster=None):
"""Get the vlan id and vswicth associated with the port group."""
- host_mor = session._call_method(vim_util, "get_objects",
- "HostSystem")[0].obj
+ host_mor = vm_util.get_host_ref(session, cluster)
port_grps_on_host_ret = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "config.network.portgroup")
@@ -141,7 +146,7 @@ def get_vlanid_and_vswitch_for_portgroup(session, pg_name):
return p_gp.spec.vlanId, p_grp_vswitch_name
-def create_port_group(session, pg_name, vswitch_name, vlan_id=0):
+def create_port_group(session, pg_name, vswitch_name, vlan_id=0, cluster=None):
"""
Creates a port group on the host system with the vlan tags
supplied. VLAN id 0 means no vlan id association.
@@ -152,8 +157,7 @@ def create_port_group(session, pg_name, vswitch_name, vlan_id=0):
vswitch_name,
pg_name,
vlan_id)
- host_mor = session._call_method(vim_util, "get_objects",
- "HostSystem")[0].obj
+ host_mor = vm_util.get_host_ref(session, cluster)
network_system_mor = session._call_method(vim_util,
"get_dynamic_property", host_mor,
"HostSystem", "configManager.networkSystem")
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py
index 5684e6aa6..137045508 100644
--- a/nova/virt/vmwareapi/vif.py
+++ b/nova/virt/vmwareapi/vif.py
@@ -36,7 +36,7 @@ vmwareapi_vif_opts = [
CONF.register_opts(vmwareapi_vif_opts)
-def ensure_vlan_bridge(self, session, network):
+def ensure_vlan_bridge(self, session, network, cluster=None):
"""Create a vlan and bridge unless they already exist."""
vlan_num = network['vlan']
bridge = network['bridge']
@@ -45,28 +45,31 @@ def ensure_vlan_bridge(self, session, network):
# Check if the vlan_interface physical network adapter exists on the
# host.
if not network_util.check_if_vlan_interface_exists(session,
- vlan_interface):
+ vlan_interface,
+ cluster):
raise exception.NetworkAdapterNotFound(adapter=vlan_interface)
# Get the vSwitch associated with the Physical Adapter
vswitch_associated = network_util.get_vswitch_for_vlan_interface(
- session, vlan_interface)
+ session, vlan_interface, cluster)
if vswitch_associated is None:
raise exception.SwitchNotFoundForNetworkAdapter(
adapter=vlan_interface)
# Check whether bridge already exists and retrieve the the ref of the
# network whose name_label is "bridge"
- network_ref = network_util.get_network_with_the_name(session, bridge)
+ network_ref = network_util.get_network_with_the_name(session, bridge,
+ cluster)
if network_ref is None:
# Create a port group on the vSwitch associated with the
# vlan_interface corresponding physical network adapter on the ESX
# host.
network_util.create_port_group(session, bridge,
- vswitch_associated, vlan_num)
+ vswitch_associated, vlan_num,
+ cluster)
else:
# Get the vlan id and vswitch corresponding to the port group
_get_pg_info = network_util.get_vlanid_and_vswitch_for_portgroup
- pg_vlanid, pg_vswitch = _get_pg_info(session, bridge)
+ pg_vlanid, pg_vswitch = _get_pg_info(session, bridge, cluster)
# Check if the vswitch associated is proper
if pg_vswitch != vswitch_associated:
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index af481b566..c1015cb13 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -480,11 +480,61 @@ def get_vm_ref_from_name(session, vm_name):
return None
-def get_datastore_ref_and_name(session):
+def get_cluster_ref_from_name(session, cluster_name):
+ """Get reference to the cluster with the name specified."""
+ cls = session._call_method(vim_util, "get_objects",
+ "ClusterComputeResource", ["name"])
+ for cluster in cls:
+ if cluster.propSet[0].val == cluster_name:
+ return cluster.obj
+ return None
+
+
+def get_host_ref(session, cluster=None):
+ """Get reference to a host within the cluster specified."""
+ if cluster is None:
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ else:
+ host_ret = session._call_method(vim_util, "get_dynamic_property",
+ cluster, "ClusterComputeResource",
+ "host")
+ if host_ret is None:
+ return
+ if not host_ret.ManagedObjectReference:
+ return
+ host_mor = host_ret.ManagedObjectReference[0]
+
+ return host_mor
+
+
+def get_datastore_ref_and_name(session, cluster=None, host=None):
"""Get the datastore list and choose the first local storage."""
- data_stores = session._call_method(vim_util, "get_objects",
- "Datastore", ["summary.type", "summary.name",
- "summary.capacity", "summary.freeSpace"])
+ if cluster is None and host is None:
+ data_stores = session._call_method(vim_util, "get_objects",
+ "Datastore", ["summary.type", "summary.name",
+ "summary.capacity", "summary.freeSpace"])
+ else:
+ if cluster is not None:
+ datastore_ret = session._call_method(
+ vim_util,
+ "get_dynamic_property", cluster,
+ "ClusterComputeResource", "datastore")
+ else:
+ datastore_ret = session._call_method(
+ vim_util,
+ "get_dynamic_property", host,
+ "HostSystem", "datastore")
+
+ if datastore_ret is None:
+ raise exception.DatastoreNotFound()
+ data_store_mors = datastore_ret.ManagedObjectReference
+ data_stores = session._call_method(vim_util,
+ "get_properties_for_a_collection_of_objects",
+ "Datastore", data_store_mors,
+ ["summary.type", "summary.name",
+ "summary.capacity", "summary.freeSpace"])
+
for elem in data_stores:
ds_name = None
ds_type = None
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 708a29fad..106de0cb0 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -73,12 +73,17 @@ RESIZE_TOTAL_STEPS = 4
class VMwareVMOps(object):
"""Management class for VM-related tasks."""
- def __init__(self, session, virtapi, volumeops):
+ def __init__(self, session, virtapi, volumeops, cluster_name=None):
"""Initializer."""
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops
+ if not cluster_name:
+ self._cluster = None
+ else:
+ self._cluster = vm_util.get_cluster_ref_from_name(
+ self._session, cluster_name)
self._instance_path_base = VMWARE_PREFIX + CONF.base_dir_name
self._default_root_device = 'vda'
self._rescue_suffix = '-rescue'
@@ -133,7 +138,7 @@ class VMwareVMOps(object):
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
- ds = vm_util.get_datastore_ref_and_name(self._session)
+ ds = vm_util.get_datastore_ref_and_name(self._session, self._cluster)
data_store_ref = ds[0]
data_store_name = ds[1]
@@ -157,11 +162,12 @@ class VMwareVMOps(object):
(vmdk_file_size_in_kb, os_type, adapter_type,
disk_type) = _get_image_properties()
- vm_folder_ref, res_pool_ref = self._get_vmfolder_and_res_pool_refs()
+ vm_folder_ref = self._get_vmfolder_ref()
+ res_pool_ref = self._get_res_pool_ref()
def _check_if_network_bridge_exists(network_name):
network_ref = network_util.get_network_with_the_name(
- self._session, network_name)
+ self._session, network_name, self._cluster)
if network_ref is None:
raise exception.NetworkNotFoundForBridge(bridge=network_name)
return network_ref
@@ -176,7 +182,8 @@ class VMwareVMOps(object):
CONF.vmware.integration_bridge
if mapping.get('should_create_vlan'):
network_ref = vmwarevif.ensure_vlan_bridge(
- self._session, network)
+ self._session, network,
+ self._cluster)
else:
network_ref = _check_if_network_bridge_exists(network_name)
vif_infos.append({'network_name': network_name,
@@ -486,7 +493,7 @@ class VMwareVMOps(object):
vm_ref,
"VirtualMachine",
"datastore")
- if not ds_ref_ret:
+ if ds_ref_ret is None:
raise exception.DatastoreNotFound()
ds_ref = ds_ref_ret.ManagedObjectReference[0]
ds_browser = vim_util.get_dynamic_property(
@@ -649,8 +656,7 @@ class VMwareVMOps(object):
LOG.debug(_("Destroyed the VM"), instance=instance)
except Exception, excep:
LOG.warn(_("In vmwareapi:vmops:delete, got this exception"
- " while destroying the VM: %s") % str(excep),
- instance=instance)
+ " while destroying the VM: %s") % str(excep))
if network_info:
self.unplug_vifs(instance, network_info)
@@ -702,8 +708,7 @@ class VMwareVMOps(object):
LOG.debug(_("Unregistered the VM"), instance=instance)
except Exception, excep:
LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
- " while un-registering the VM: %s") % str(excep),
- instance=instance)
+ " while un-registering the VM: %s") % str(excep))
if network_info:
self.unplug_vifs(instance, network_info)
@@ -735,8 +740,7 @@ class VMwareVMOps(object):
LOG.warn(_("In vmwareapi:vmops:destroy, "
"got this exception while deleting"
" the VM contents from the disk: %s")
- % str(excep),
- instance=instance)
+ % str(excep))
except Exception, exc:
LOG.exception(exc, instance=instance)
@@ -936,11 +940,12 @@ class VMwareVMOps(object):
total_steps=RESIZE_TOTAL_STEPS)
# Get the clone vm spec
- ds_ref = vm_util.get_datastore_ref_and_name(self._session)[0]
+ ds_ref = vm_util.get_datastore_ref_and_name(
+ self._session, None, dest)[0]
client_factory = self._session._get_vim().client.factory
rel_spec = vm_util.relocate_vm_spec(client_factory, ds_ref, host_ref)
clone_spec = vm_util.clone_vm_spec(client_factory, rel_spec)
- vm_folder_ref, res_pool_ref = self._get_vmfolder_and_res_pool_refs()
+ vm_folder_ref = self._get_vmfolder_ref()
# 3. Clone VM on ESX host
LOG.debug(_("Cloning VM to host %s") % dest, instance=instance)
@@ -1203,18 +1208,27 @@ class VMwareVMOps(object):
return host.obj
return None
- def _get_vmfolder_and_res_pool_refs(self):
+ def _get_vmfolder_ref(self):
"""Get the Vm folder ref from the datacenter."""
dc_objs = self._session._call_method(vim_util, "get_objects",
- "Datacenter", ["vmFolder"])
+ "Datacenter", ["vmFolder"])
# There is only one default datacenter in a standalone ESX host
vm_folder_ref = dc_objs[0].propSet[0].val
+ return vm_folder_ref
+ def _get_res_pool_ref(self):
# Get the resource pool. Taking the first resource pool coming our
# way. Assuming that is the default resource pool.
- res_pool_ref = self._session._call_method(vim_util, "get_objects",
- "ResourcePool")[0].obj
- return vm_folder_ref, res_pool_ref
+ if self._cluster is None:
+ res_pool_ref = self._session._call_method(vim_util, "get_objects",
+ "ResourcePool")[0].obj
+ else:
+ res_pool_ref = self._session._call_method(vim_util,
+ "get_dynamic_property",
+ self._cluster,
+ "ClusterComputeResource",
+ "resourcePool")
+ return res_pool_ref
def _path_exists(self, ds_browser, ds_path):
"""Check if the path exists on the datastore."""
@@ -1269,9 +1283,11 @@ class VMwareVMOps(object):
DataStore.
"""
LOG.debug(_("Creating directory with path %s") % ds_path)
+ dc_ref = self._get_datacenter_ref_and_name()[0]
self._session._call_method(self._session._get_vim(), "MakeDirectory",
self._session._get_vim().get_service_content().fileManager,
- name=ds_path, createParentDirectories=False)
+ name=ds_path, datacenter=dc_ref,
+ createParentDirectories=False)
LOG.debug(_("Created directory with path %s") % ds_path)
def _check_if_folder_file_exists(self, ds_ref, ds_name,
diff --git a/nova/virt/vmwareapi/volume_util.py b/nova/virt/vmwareapi/volume_util.py
index 2af3381a4..ae9a30e37 100644
--- a/nova/virt/vmwareapi/volume_util.py
+++ b/nova/virt/vmwareapi/volume_util.py
@@ -24,6 +24,7 @@ import string
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
@@ -35,33 +36,33 @@ class StorageError(Exception):
super(StorageError, self).__init__(message)
-def get_host_iqn(session):
+def get_host_iqn(session, cluster=None):
"""
Return the host iSCSI IQN.
"""
- host_mor = session._call_method(vim_util, "get_objects",
- "HostSystem")[0].obj
+ host_mor = vm_util.get_host_ref(session, cluster)
hbas_ret = session._call_method(vim_util, "get_dynamic_property",
host_mor, "HostSystem",
"config.storageDevice.hostBusAdapter")
# Meaning there are no host bus adapters on the host
- if not hbas_ret:
+ if hbas_ret is None:
return
host_hbas = hbas_ret.HostHostBusAdapter
+ if not host_hbas:
+ return
for hba in host_hbas:
if hba.__class__.__name__ == 'HostInternetScsiHba':
return hba.iScsiName
-def find_st(session, data):
+def find_st(session, data, cluster=None):
"""
Return the iSCSI Target given a volume info.
"""
target_portal = data['target_portal']
target_iqn = data['target_iqn']
- host_mor = session._call_method(vim_util, "get_objects",
- "HostSystem")[0].obj
+ host_mor = vm_util.get_host_ref(session, cluster)
lst_properties = ["config.storageDevice.hostBusAdapter",
"config.storageDevice.scsiTopology",
@@ -133,13 +134,14 @@ def find_st(session, data):
return result
-def rescan_iscsi_hba(session):
+def rescan_iscsi_hba(session, cluster=None):
"""
Rescan the iSCSI HBA to discover iSCSI targets.
"""
- # There is only one default storage system in a standalone ESX host
- storage_system_mor = session._call_method(vim_util, "get_objects",
- "HostSystem", ["configManager.storageSystem"])[0].propSet[0].val
+ host_mor = vm_util.get_host_ref(session, cluster)
+ storage_system_mor = session._call_method(vim_util, "get_dynamic_property",
+ host_mor, "HostSystem",
+ "configManager.storageSystem")
hbas_ret = session._call_method(vim_util,
"get_dynamic_property",
storage_system_mor,
diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py
index 922d2135b..855106e4a 100644
--- a/nova/virt/vmwareapi/volumeops.py
+++ b/nova/virt/vmwareapi/volumeops.py
@@ -35,8 +35,13 @@ class VMwareVolumeOps(object):
Management class for Volume-related tasks
"""
- def __init__(self, session):
+ def __init__(self, session, cluster_name=None):
self._session = session
+ if not cluster_name:
+ self._cluster = None
+ else:
+ self._cluster = vm_util.get_cluster_ref_from_name(
+ self._session, cluster_name)
def attach_disk_to_vm(self, vm_ref, instance_name,
adapter_type, disk_type, vmdk_path=None,
@@ -88,14 +93,16 @@ class VMwareVolumeOps(object):
target_iqn = data['target_iqn']
LOG.debug(_("Discovering iSCSI target %(target_iqn)s from "
"%(target_portal)s.") % locals())
- device_name, uuid = volume_util.find_st(self._session, data)
+ device_name, uuid = volume_util.find_st(self._session, data,
+ self._cluster)
if device_name:
LOG.debug(_("Storage target found. No need to discover"))
return (device_name, uuid)
# Rescan iSCSI HBA
- volume_util.rescan_iscsi_hba(self._session)
+ volume_util.rescan_iscsi_hba(self._session, self._cluster)
# Find iSCSI Target again
- device_name, uuid = volume_util.find_st(self._session, data)
+ device_name, uuid = volume_util.find_st(self._session, data,
+ self._cluster)
if device_name:
LOG.debug(_("Discovered iSCSI target %(target_iqn)s from "
"%(target_portal)s.") % locals())
@@ -106,7 +113,7 @@ class VMwareVolumeOps(object):
def get_volume_connector(self, instance):
"""Return volume connector information."""
- iqn = volume_util.get_host_iqn(self._session)
+ iqn = volume_util.get_host_iqn(self._session, self._cluster)
return {
'ip': CONF.vmwareapi_host_ip,
'initiator': iqn,
@@ -167,7 +174,8 @@ class VMwareVolumeOps(object):
data = connection_info['data']
# Discover iSCSI Target
- device_name, uuid = volume_util.find_st(self._session, data)
+ device_name, uuid = volume_util.find_st(self._session, data,
+ self._cluster)
if device_name is None:
raise volume_util.StorageError(_("Unable to find iSCSI Target"))