summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--nova/exception.py10
-rw-r--r--nova/tests/test_vmwareapi.py48
-rw-r--r--nova/virt/vmwareapi/driver.py101
-rw-r--r--nova/virt/vmwareapi/read_write_util.py2
-rw-r--r--nova/virt/vmwareapi/vif.py2
-rw-r--r--nova/virt/vmwareapi/vm_util.py46
-rw-r--r--nova/virt/vmwareapi/vmops.py783
-rw-r--r--nova/virt/vmwareapi/vmware_images.py14
-rw-r--r--nova/virt/vmwareapi/volumeops.py4
9 files changed, 829 insertions, 181 deletions
diff --git a/nova/exception.py b/nova/exception.py
index c15fc1e43..a9a565393 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -317,7 +317,15 @@ class InstanceSuspendFailure(Invalid):
class InstanceResumeFailure(Invalid):
- message = _("Failed to resume server") + ": %(reason)s."
+ message = _("Failed to resume instance: %(reason)s.")
+
+
+class InstancePowerOnFailure(Invalid):
+ message = _("Failed to power on instance: %(reason)s.")
+
+
+class InstancePowerOffFailure(Invalid):
+ message = _("Failed to power off instance: %(reason)s.")
class InstanceRebootFailure(Invalid):
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index f99f1abe0..34f03a555 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -41,7 +42,9 @@ class VMwareAPIVMTestCase(test.TestCase):
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.flags(vmwareapi_host_ip='test_url',
vmwareapi_host_username='test_username',
- vmwareapi_host_password='test_pass')
+ vmwareapi_host_password='test_pass',
+ vnc_enabled=False,
+ use_linked_clone=False)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
@@ -211,7 +214,7 @@ class VMwareAPIVMTestCase(test.TestCase):
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'name': 1})
- self._check_vm_info(info, power_state.PAUSED)
+ self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
self.instance, self.network_info, 'SOFT')
@@ -221,7 +224,7 @@ class VMwareAPIVMTestCase(test.TestCase):
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'name': 1})
- self._check_vm_info(info, power_state.PAUSED)
+ self._check_vm_info(info, power_state.SUSPENDED)
def test_suspend_non_existent(self):
self._create_instance_in_the_db()
@@ -234,7 +237,7 @@ class VMwareAPIVMTestCase(test.TestCase):
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.instance)
info = self.conn.get_info({'name': 1})
- self._check_vm_info(info, power_state.PAUSED)
+ self._check_vm_info(info, power_state.SUSPENDED)
self.conn.resume(self.instance, self.network_info)
info = self.conn.get_info({'name': 1})
self._check_vm_info(info, power_state.RUNNING)
@@ -251,6 +254,43 @@ class VMwareAPIVMTestCase(test.TestCase):
self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
self.instance, self.network_info)
+ def test_power_on(self):
+ self._create_vm()
+ info = self.conn.get_info({'name': 1})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.power_off(self.instance)
+ info = self.conn.get_info({'name': 1})
+ self._check_vm_info(info, power_state.SHUTDOWN)
+ self.conn.power_on(self.instance)
+ info = self.conn.get_info({'name': 1})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_power_on_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
+ self.instance)
+
+ def test_power_off(self):
+ self._create_vm()
+ info = self.conn.get_info({'name': 1})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.power_off(self.instance)
+ info = self.conn.get_info({'name': 1})
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ def test_power_off_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(exception.InstanceNotFound, self.conn.power_off,
+ self.instance)
+
+ def test_power_off_suspended(self):
+ self._create_vm()
+ self.conn.suspend(self.instance)
+ info = self.conn.get_info({'name': 1})
+ self._check_vm_info(info, power_state.SUSPENDED)
+ self.assertRaises(exception.InstancePowerOffFailure,
+ self.conn.power_off, self.instance)
+
def test_get_info(self):
self._create_vm()
info = self.conn.get_info({'name': 1})
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 855f55345..67822f2c9 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -20,16 +21,19 @@ A connection to the VMware ESX platform.
**Related Flags**
-:vmwareapi_host_ip: IPAddress of VMware ESX server.
-:vmwareapi_host_username: Username for connection to VMware ESX Server.
-:vmwareapi_host_password: Password for connection to VMware ESX Server.
-:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
- remote tasks
- (default: 1.0).
-:vmwareapi_api_retry_count: The API retry count in case of failure such as
- network failures (socket errors etc.)
- (default: 10).
-
+:vmwareapi_host_ip: IP address of VMware ESX server.
+:vmwareapi_host_username: Username for connection to VMware ESX Server.
+:vmwareapi_host_password: Password for connection to VMware ESX Server.
+:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
+ remote tasks
+ (default: 5.0).
+:vmwareapi_api_retry_count: The API retry count in case of failure such as
+ network failures (socket errors etc.)
+ (default: 10).
+:vnc_port: VNC starting port (default: 5900)
+:vnc_port_total: Total number of VNC ports (default: 10000)
+:vnc_password: VNC password
+:use_linked_clone: Whether to use linked clone (default: True)
"""
import time
@@ -78,6 +82,18 @@ vmwareapi_opts = [
'socket error, etc. '
'Used only if compute_driver is '
'vmwareapi.VMwareESXDriver.'),
+ cfg.IntOpt('vnc_port',
+ default=5900,
+ help='VNC starting port'),
+ cfg.IntOpt('vnc_port_total',
+ default=10000,
+ help='Total number of VNC ports'),
+ cfg.StrOpt('vnc_password',
+ default=None,
+ help='VNC password'),
+ cfg.BoolOpt('use_linked_clone',
+ default=True,
+ help='Whether to use linked clone'),
]
CONF = cfg.CONF
@@ -116,7 +132,8 @@ class VMwareESXDriver(driver.ComputeDriver):
host_username, host_password,
api_retry_count, scheme=scheme)
self._volumeops = volumeops.VMwareVolumeOps(self._session)
- self._vmops = vmops.VMwareVMOps(self._session)
+ self._vmops = vmops.VMwareVMOps(self._session, self.virtapi,
+ self._volumeops)
self._host = host.Host(self._session)
self._host_state = None
@@ -142,7 +159,8 @@ class VMwareESXDriver(driver.ComputeDriver):
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
- self._vmops.spawn(context, instance, image_meta, network_info)
+ self._vmops.spawn(context, instance, image_meta, network_info,
+ block_device_info)
def snapshot(self, context, instance, name, update_task_state):
"""Create snapshot from a running VM instance."""
@@ -174,6 +192,61 @@ class VMwareESXDriver(driver.ComputeDriver):
"""Resume the suspended VM instance."""
self._vmops.resume(instance)
+ def rescue(self, context, instance, network_info, image_meta,
+ rescue_password):
+ """Rescue the specified instance."""
+ self._vmops.rescue(context, instance, network_info, image_meta)
+
+ def unrescue(self, instance, network_info):
+ """Unrescue the specified instance."""
+ self._vmops.unrescue(instance)
+
+ def power_off(self, instance):
+ """Power off the specified instance."""
+ self._vmops.power_off(instance)
+
+ def power_on(self, instance):
+ """Power on the specified instance."""
+ self._vmops.power_on(instance)
+
+ def migrate_disk_and_power_off(self, context, instance, dest,
+ instance_type, network_info,
+ block_device_info=None):
+ """
+ Transfers the disk of a running instance in multiple phases, turning
+ off the instance before the end.
+ """
+ return self._vmops.migrate_disk_and_power_off(context, instance,
+ dest, instance_type)
+
+ def confirm_migration(self, migration, instance, network_info):
+ """Confirms a resize, destroying the source VM."""
+ self._vmops.confirm_migration(migration, instance, network_info)
+
+ def finish_revert_migration(self, instance, network_info,
+ block_device_info=None):
+ """Finish reverting a resize, powering back on the instance."""
+ self._vmops.finish_revert_migration(instance)
+
+ def finish_migration(self, context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance=False,
+ block_device_info=None):
+ """Completes a resize, turning on the migrated instance."""
+ self._vmops.finish_migration(context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance)
+
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method, block_migration=False,
+ migrate_data=None):
+ """Live migration of an instance to another host."""
+ self._vmops.live_migration(context, instance_ref, dest,
+ post_method, recover_method,
+ block_migration)
+
+ def poll_rebooting_instances(self, timeout, instances):
+ """Poll for rebooting instances."""
+ self._vmops.poll_rebooting_instances(timeout, instances)
+
def get_info(self, instance):
"""Return info about the VM instance."""
return self._vmops.get_info(instance)
@@ -186,6 +259,10 @@ class VMwareESXDriver(driver.ComputeDriver):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
+ def get_vnc_console(self, instance):
+ """Return link to instance's VNC console."""
+ return self._vmops.get_vnc_console(instance)
+
def get_volume_connector(self, instance):
"""Return volume connector information."""
return self._volumeops.get_volume_connector(instance)
diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py
index 39ea8e2e8..5dcdc6fdb 100644
--- a/nova/virt/vmwareapi/read_write_util.py
+++ b/nova/virt/vmwareapi/read_write_util.py
@@ -140,7 +140,7 @@ class VMwareHTTPWriteFile(VMwareHTTPFile):
self.conn.getresponse()
except Exception, excep:
LOG.debug(_("Exception during HTTP connection close in "
- "VMwareHTTpWrite. Exception is %s") % excep)
+ "VMwareHTTPWrite. Exception is %s") % excep)
super(VMwareHTTPWriteFile, self).close()
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py
index c5b524186..5684e6aa6 100644
--- a/nova/virt/vmwareapi/vif.py
+++ b/nova/virt/vmwareapi/vif.py
@@ -45,7 +45,7 @@ def ensure_vlan_bridge(self, session, network):
# Check if the vlan_interface physical network adapter exists on the
# host.
if not network_util.check_if_vlan_interface_exists(session,
- vlan_interface):
+ vlan_interface):
raise exception.NetworkAdapterNotFound(adapter=vlan_interface)
# Get the vSwitch associated with the Physical Adapter
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index c80754327..af481b566 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -361,6 +361,27 @@ def delete_virtual_disk_spec(client_factory, device):
return virtual_device_config
+def clone_vm_spec(client_factory, location,
+ power_on=False, snapshot=None, template=False):
+ """Builds the VM clone spec."""
+ clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec')
+ clone_spec.location = location
+ clone_spec.powerOn = power_on
+ clone_spec.snapshot = snapshot
+ clone_spec.template = template
+ return clone_spec
+
+
+def relocate_vm_spec(client_factory, datastore=None, host=None,
+ disk_move_type="moveAllDiskBackingsAndAllowSharing"):
+ """Builds the VM relocation spec."""
+ rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec')
+ rel_spec.datastore = datastore
+ rel_spec.diskMoveType = disk_move_type
+ rel_spec.host = host
+ return rel_spec
+
+
def get_dummy_vm_create_spec(client_factory, name, data_store_name):
"""Builds the dummy VM create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
@@ -424,6 +445,31 @@ def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
return vswitch_port_group_spec
+def get_vnc_config_spec(client_factory, port, password):
+ """Builds the vnc config spec."""
+ virtual_machine_config_spec = client_factory.create(
+ 'ns0:VirtualMachineConfigSpec')
+
+ opt_enabled = client_factory.create('ns0:OptionValue')
+ opt_enabled.key = "RemoteDisplay.vnc.enabled"
+ opt_enabled.value = "true"
+ opt_port = client_factory.create('ns0:OptionValue')
+ opt_port.key = "RemoteDisplay.vnc.port"
+ opt_port.value = port
+ opt_pass = client_factory.create('ns0:OptionValue')
+ opt_pass.key = "RemoteDisplay.vnc.password"
+ opt_pass.value = password
+ virtual_machine_config_spec.extraConfig = [opt_enabled, opt_port, opt_pass]
+ return virtual_machine_config_spec
+
+
+def search_datastore_spec(client_factory, file_name):
+ """Builds the datastore search spec."""
+ search_spec = client_factory.create('ns0:HostDatastoreBrowserSearchSpec')
+ search_spec.matchPattern = [file_name]
+ return search_spec
+
+
def get_vm_ref_from_name(session, vm_name):
"""Get reference to the VM with the name specified."""
vms = session._call_method(vim_util, "get_objects",
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 187fc449d..5ea9f105c 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -27,12 +27,18 @@ import urllib
import urllib2
import uuid
+from nova import block_device
+from nova.compute import api as compute
from nova.compute import power_state
from nova.compute import task_states
+from nova import context as nova_context
from nova import exception
from nova.openstack.common import cfg
+from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+from nova.virt import driver
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif as vmwarevif
from nova.virt.vmwareapi import vim_util
@@ -52,21 +58,33 @@ vmware_group = cfg.OptGroup(name='vmware',
CONF = cfg.CONF
CONF.register_group(vmware_group)
CONF.register_opts(vmware_vif_opts, vmware_group)
+CONF.import_opt('vnc_enabled', 'nova.vnc')
LOG = logging.getLogger(__name__)
VMWARE_POWER_STATES = {
'poweredOff': power_state.SHUTDOWN,
'poweredOn': power_state.RUNNING,
- 'suspended': power_state.PAUSED}
+ 'suspended': power_state.SUSPENDED}
+VMWARE_PREFIX = 'vmware'
+
+
+RESIZE_TOTAL_STEPS = 4
class VMwareVMOps(object):
"""Management class for VM-related tasks."""
- def __init__(self, session):
+ def __init__(self, session, virtapi, volumeops):
"""Initializer."""
+ self.compute_api = compute.API()
self._session = session
+ self._virtapi = virtapi
+ self._volumeops = volumeops
+ self._instance_path_base = VMWARE_PREFIX + CONF.base_dir_name
+ self._default_root_device = 'vda'
+ self._rescue_suffix = '-rescue'
+ self._poll_rescue_last_ran = None
def list_instances(self):
"""Lists the VM instances that are registered with the ESX host."""
@@ -83,13 +101,14 @@ class VMwareVMOps(object):
vm_name = prop.val
elif prop.name == "runtime.connectionState":
conn_state = prop.val
- # Ignoring the oprhaned or inaccessible VMs
+ # Ignoring the orphaned or inaccessible VMs
if conn_state not in ["orphaned", "inaccessible"]:
lst_vm_names.append(vm_name)
LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
return lst_vm_names
- def spawn(self, context, instance, image_meta, network_info):
+ def spawn(self, context, instance, image_meta, network_info,
+ block_device_info=None):
"""
Creates a VM instance.
@@ -97,44 +116,28 @@ class VMwareVMOps(object):
1. Create a VM with no disk and the specifics in the instance object
like RAM size.
- 2. Create a dummy vmdk of the size of the disk file that is to be
- uploaded. This is required just to create the metadata file.
- 3. Delete the -flat.vmdk file created in the above step and retain
- the metadata .vmdk file.
- 4. Upload the disk file.
- 5. Attach the disk to the VM by reconfiguring the same.
- 6. Power on the VM.
+ 2. For flat disk
+ 2.1. Create a dummy vmdk of the size of the disk file that is to be
+ uploaded. This is required just to create the metadata file.
+ 2.2. Delete the -flat.vmdk file created in the above step and retain
+ the metadata .vmdk file.
+ 2.3. Upload the disk file.
+ 3. For sparse disk
+ 3.1. Upload the disk file to a -sparse.vmdk file.
+ 3.2. Copy/Clone the -sparse.vmdk file to a thin vmdk.
+ 3.3. Delete the -sparse.vmdk file.
+ 4. Attach the disk to the VM by reconfiguring the same.
+ 5. Power on the VM.
"""
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref:
- raise exception.InstanceExists(name=instance.name)
+ raise exception.InstanceExists(name=instance['name'])
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
-
- def _get_datastore_ref():
- """Get the datastore list and choose the first local storage."""
- data_stores = self._session._call_method(vim_util, "get_objects",
- "Datastore", ["summary.type", "summary.name"])
- for elem in data_stores:
- ds_name = None
- ds_type = None
- for prop in elem.propSet:
- if prop.name == "summary.type":
- ds_type = prop.val
- elif prop.name == "summary.name":
- ds_name = prop.val
- # Local storage identifier
- if ds_type == "VMFS":
- data_store_name = ds_name
- return data_store_name
-
- if data_store_name is None:
- msg = _("Couldn't get a local Datastore reference")
- LOG.error(msg, instance=instance)
- raise exception.NovaException(msg)
-
- data_store_name = _get_datastore_ref()
+ ds = vm_util.get_datastore_ref_and_name(self._session)
+ data_store_ref = ds[0]
+ data_store_name = ds[1]
def _get_image_properties():
"""
@@ -142,31 +145,21 @@ class VMwareVMOps(object):
repository.
"""
_image_info = vmware_images.get_vmdk_size_and_properties(context,
- instance.image_ref,
- instance)
+ instance['image_ref'],
+ instance)
image_size, image_properties = _image_info
vmdk_file_size_in_kb = int(image_size) / 1024
os_type = image_properties.get("vmware_ostype", "otherGuest")
adapter_type = image_properties.get("vmware_adaptertype",
"lsiLogic")
- return vmdk_file_size_in_kb, os_type, adapter_type
-
- vmdk_file_size_in_kb, os_type, adapter_type = _get_image_properties()
+ disk_type = image_properties.get("vmware_disktype",
+ "preallocated")
+ return vmdk_file_size_in_kb, os_type, adapter_type, disk_type
- def _get_vmfolder_and_res_pool_mors():
- """Get the Vm folder ref from the datacenter."""
- dc_objs = self._session._call_method(vim_util, "get_objects",
- "Datacenter", ["vmFolder"])
- # There is only one default datacenter in a standalone ESX host
- vm_folder_mor = dc_objs[0].propSet[0].val
+ (vmdk_file_size_in_kb, os_type, adapter_type,
+ disk_type) = _get_image_properties()
- # Get the resource pool. Taking the first resource pool coming our
- # way. Assuming that is the default resource pool.
- res_pool_mor = self._session._call_method(vim_util, "get_objects",
- "ResourcePool")[0].obj
- return vm_folder_mor, res_pool_mor
-
- vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
+ vm_folder_ref, res_pool_ref = self._get_vmfolder_and_res_pool_refs()
def _check_if_network_bridge_exists(network_name):
network_ref = network_util.get_network_with_the_name(
@@ -177,6 +170,8 @@ class VMwareVMOps(object):
def _get_vif_infos():
vif_infos = []
+ if network_info is None:
+ return vif_infos
for (network, mapping) in network_info:
mac_address = mapping['mac']
network_name = network['bridge'] or \
@@ -201,33 +196,29 @@ class VMwareVMOps(object):
def _execute_create_vm():
"""Create VM on ESX host."""
- LOG.debug(_("Creating VM on the ESX host"), instance=instance)
+ LOG.debug(_("Creating VM on the ESX host"), instance=instance)
# Create the VM on the ESX host
vm_create_task = self._session._call_method(
self._session._get_vim(),
- "CreateVM_Task", vm_folder_mor,
- config=config_spec, pool=res_pool_mor)
+ "CreateVM_Task", vm_folder_ref,
+ config=config_spec, pool=res_pool_ref)
self._session._wait_for_task(instance['uuid'], vm_create_task)
- LOG.debug(_("Created VM on the ESX host"), instance=instance)
+ LOG.debug(_("Created VM on the ESX host"), instance=instance)
_execute_create_vm()
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
if CONF.flat_injected:
self._set_machine_id(client_factory, instance, network_info)
- # Naming the VM files in correspondence with the VM instance name
- # The flat vmdk file name
- flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % (instance.name,
- instance.name)
- # The vmdk meta-data file
- uploaded_vmdk_name = "%s/%s.vmdk" % (instance.name, instance.name)
- flat_uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
- flat_uploaded_vmdk_name)
- uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
- uploaded_vmdk_name)
+ # Set the vnc configuration of the instance, vnc port starts from 5900
+ if CONF.vnc_enabled:
+ vnc_port = self._get_vnc_port(vm_ref)
+ vnc_pass = CONF.vnc_password or ''
+ self._set_vnc_config(client_factory, instance, vnc_port, vnc_pass)
def _create_virtual_disk():
"""Create a virtual disk of the size of flat vmdk file."""
@@ -238,103 +229,186 @@ class VMwareVMOps(object):
# Here we assume thick provisioning and lsiLogic for the adapter
# type
LOG.debug(_("Creating Virtual Disk of size "
- "%(vmdk_file_size_in_kb)s KB and adapter type "
- "%(adapter_type)s on the ESX host local store"
- " %(data_store_name)s") %
+ "%(vmdk_file_size_in_kb)s KB and adapter type "
+ "%(adapter_type)s on the ESX host local store "
+ "%(data_store_name)s") %
{"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
"adapter_type": adapter_type,
"data_store_name": data_store_name},
instance=instance)
vmdk_create_spec = vm_util.get_vmdk_create_spec(client_factory,
- vmdk_file_size_in_kb, adapter_type)
+ vmdk_file_size_in_kb, adapter_type,
+ disk_type)
vmdk_create_task = self._session._call_method(
self._session._get_vim(),
"CreateVirtualDisk_Task",
service_content.virtualDiskManager,
name=uploaded_vmdk_path,
- datacenter=self._get_datacenter_name_and_ref()[0],
+ datacenter=dc_ref,
spec=vmdk_create_spec)
self._session._wait_for_task(instance['uuid'], vmdk_create_task)
LOG.debug(_("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
- " KB on the ESX host local store "
- "%(data_store_name)s") %
+ " KB and type %(disk_type)s on "
+ "the ESX host local store %(data_store_name)s") %
{"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
+ "disk_type": disk_type,
"data_store_name": data_store_name},
instance=instance)
- _create_virtual_disk()
-
- def _delete_disk_file():
- LOG.debug(_("Deleting the file %(flat_uploaded_vmdk_path)s "
+ def _delete_disk_file(vmdk_path):
+ LOG.debug(_("Deleting the file %(vmdk_path)s "
"on the ESX host local"
"store %(data_store_name)s") %
- {"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
+ {"vmdk_path": vmdk_path,
"data_store_name": data_store_name},
instance=instance)
- # Delete the -flat.vmdk file created. .vmdk file is retained.
+ # Delete the vmdk file.
vmdk_delete_task = self._session._call_method(
self._session._get_vim(),
"DeleteDatastoreFile_Task",
service_content.fileManager,
- name=flat_uploaded_vmdk_path)
+ name=vmdk_path,
+ datacenter=dc_ref)
self._session._wait_for_task(instance['uuid'], vmdk_delete_task)
- LOG.debug(_("Deleted the file %(flat_uploaded_vmdk_path)s on the "
+ LOG.debug(_("Deleted the file %(vmdk_path)s on the "
"ESX host local store %(data_store_name)s") %
- {"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
+ {"vmdk_path": vmdk_path,
"data_store_name": data_store_name},
instance=instance)
- _delete_disk_file()
-
- cookies = self._session._get_vim().client.options.transport.cookiejar
-
def _fetch_image_on_esx_datastore():
"""Fetch image from Glance to ESX datastore."""
LOG.debug(_("Downloading image file data %(image_ref)s to the ESX "
"data store %(data_store_name)s") %
- {'image_ref': instance.image_ref,
+ {'image_ref': instance['image_ref'],
'data_store_name': data_store_name},
instance=instance)
- # Upload the -flat.vmdk file whose meta-data file we just created
- # above
+ # For flat disk, upload the -flat.vmdk file whose meta-data file
+ # we just created above
+ # For sparse disk, upload the -sparse.vmdk file to be copied into
+ # a flat vmdk
+ upload_vmdk_name = sparse_uploaded_vmdk_name \
+ if disk_type == "sparse" else flat_uploaded_vmdk_name
vmware_images.fetch_image(
context,
- instance.image_ref,
+ instance['image_ref'],
instance,
host=self._session._host_ip,
- data_center_name=self._get_datacenter_name_and_ref()[1],
+ data_center_name=self._get_datacenter_ref_and_name()[1],
datastore_name=data_store_name,
cookies=cookies,
- file_path=flat_uploaded_vmdk_name)
- LOG.debug(_("Downloaded image file data %(image_ref)s to the ESX "
- "data store %(data_store_name)s") %
- {'image_ref': instance.image_ref,
+ file_path=upload_vmdk_name)
+ LOG.debug(_("Downloaded image file data %(image_ref)s to "
+ "%(upload_vmdk_name)s on the ESX data store "
+ "%(data_store_name)s") %
+ {'image_ref': instance['image_ref'],
+ 'upload_vmdk_name': upload_vmdk_name,
'data_store_name': data_store_name},
instance=instance)
- _fetch_image_on_esx_datastore()
-
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
- def _attach_vmdk_to_the_vm():
- """
- Attach the vmdk uploaded to the VM. VM reconfigure is done
- to do so.
- """
- vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
- client_factory,
- adapter_type, "preallocated",
- uploaded_vmdk_path, vmdk_file_size_in_kb)
- LOG.debug(_("Reconfiguring VM instance to attach the image disk"),
- instance=instance)
- reconfig_task = self._session._call_method(
- self._session._get_vim(),
- "ReconfigVM_Task", vm_ref,
- spec=vmdk_attach_config_spec)
- self._session._wait_for_task(instance['uuid'], reconfig_task)
- LOG.debug(_("Reconfigured VM instance to attach the image disk"),
+ def _copy_virtual_disk():
+ """Copy a sparse virtual disk to a thin virtual disk."""
+ # Copy a sparse virtual disk to a thin virtual disk. This is also
+ # done to generate the meta-data file whose specifics
+ # depend on the size of the disk, thin/thick provisioning and the
+ # storage adapter type.
+ LOG.debug(_("Copying Virtual Disk of size "
+ "%(vmdk_file_size_in_kb)s KB and adapter type "
+ "%(adapter_type)s on the ESX host local store "
+ "%(data_store_name)s to disk type %(disk_type)s") %
+ {"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
+ "adapter_type": adapter_type,
+ "data_store_name": data_store_name,
+ "disk_type": disk_type},
instance=instance)
+ vmdk_copy_spec = vm_util.get_vmdk_create_spec(client_factory,
+ vmdk_file_size_in_kb, adapter_type,
+ disk_type)
+ vmdk_copy_task = self._session._call_method(
+ self._session._get_vim(),
+ "CopyVirtualDisk_Task",
+ service_content.virtualDiskManager,
+ sourceName=sparse_uploaded_vmdk_path,
+ sourceDatacenter=self._get_datacenter_ref_and_name()[0],
+ destName=uploaded_vmdk_path,
+ destSpec=vmdk_copy_spec)
+ self._session._wait_for_task(instance['uuid'], vmdk_copy_task)
+ LOG.debug(_("Copied Virtual Disk of size %(vmdk_file_size_in_kb)s"
+ " KB and type %(disk_type)s on "
+ "the ESX host local store %(data_store_name)s") %
+ {"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
+ "disk_type": disk_type,
+ "data_store_name": data_store_name},
+ instance=instance)
+
+ ebs_root = self._volume_in_mapping(self._default_root_device,
+ block_device_info)
- _attach_vmdk_to_the_vm()
+ if not ebs_root:
+ linked_clone = CONF.use_linked_clone
+ if linked_clone:
+ upload_folder = self._instance_path_base
+ upload_name = instance['image_ref']
+ else:
+ upload_folder = instance['name']
+ upload_name = instance['name']
+
+ # The vmdk meta-data file
+ uploaded_vmdk_name = "%s/%s.vmdk" % (upload_folder, upload_name)
+ uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
+ uploaded_vmdk_name)
+
+ if not (linked_clone and self._check_if_folder_file_exists(
+ data_store_ref, data_store_name,
+ upload_folder, upload_name + ".vmdk")):
+
+ # Naming the VM files in correspondence with the VM instance
+ # The flat vmdk file name
+ flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % (
+ upload_folder, upload_name)
+ # The sparse vmdk file name for sparse disk image
+ sparse_uploaded_vmdk_name = "%s/%s-sparse.vmdk" % (
+ upload_folder, upload_name)
+
+ flat_uploaded_vmdk_path = vm_util.build_datastore_path(
+ data_store_name,
+ flat_uploaded_vmdk_name)
+ sparse_uploaded_vmdk_path = vm_util.build_datastore_path(
+ data_store_name,
+ sparse_uploaded_vmdk_name)
+ dc_ref = self._get_datacenter_ref_and_name()[0]
+
+ if disk_type != "sparse":
+ # Create a flat virtual disk and retain the metadata file.
+ _create_virtual_disk()
+ _delete_disk_file(flat_uploaded_vmdk_path)
+
+ cookies = \
+ self._session._get_vim().client.options.transport.cookiejar
+ _fetch_image_on_esx_datastore()
+
+ if disk_type == "sparse":
+ # Copy the sparse virtual disk to a thin virtual disk.
+ disk_type = "thin"
+ _copy_virtual_disk()
+ _delete_disk_file(sparse_uploaded_vmdk_path)
+ else:
+ # linked clone base disk exists
+ if disk_type == "sparse":
+ disk_type = "thin"
+
+ # Attach the vmdk uploaded to the VM.
+ self._volumeops.attach_disk_to_vm(
+ vm_ref, instance,
+ adapter_type, disk_type, uploaded_vmdk_path,
+ vmdk_file_size_in_kb, linked_clone)
+ else:
+ # Attach the root disk to the VM.
+ root_disk = driver.block_device_info_get_mapping(
+ block_device_info)[0]
+ connection_info = root_disk['connection_info']
+ self._volumeops.attach_volume(connection_info, instance['name'],
+ self._default_root_device)
def _power_on_vm():
"""Power on the VM."""
@@ -362,9 +436,9 @@ class VMwareVMOps(object):
4. Now upload the -flat.vmdk file to the image store.
5. Delete the coalesced .vmdk and -flat.vmdk created.
"""
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance.id)
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -378,7 +452,7 @@ class VMwareVMOps(object):
disk_type, unit_number) = vm_util.get_vmdk_path_and_adapter_type(
hardware_devices)
datastore_name = vm_util.split_datastore_path(
- vmdk_file_path_before_snapshot)[0]
+ vmdk_file_path_before_snapshot)[0]
os_type = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "summary.config.guestId")
@@ -395,7 +469,7 @@ class VMwareVMOps(object):
snapshot_task = self._session._call_method(
self._session._get_vim(),
"CreateSnapshot_Task", vm_ref,
- name="%s-snapshot" % instance.name,
+ name="%s-snapshot" % instance['name'],
description="Taking Snapshot of the VM",
memory=False,
quiesce=True)
@@ -437,13 +511,14 @@ class VMwareVMOps(object):
random_name = str(uuid.uuid4())
dest_vmdk_file_location = vm_util.build_datastore_path(datastore_name,
"vmware-tmp/%s.vmdk" % random_name)
- dc_ref = self._get_datacenter_name_and_ref()[0]
+ dc_ref = self._get_datacenter_ref_and_name()[0]
def _copy_vmdk_content():
# Copy the contents of the disk ( or disks, if there were snapshots
# done earlier) to a temporary vmdk file.
copy_spec = vm_util.get_copy_virtual_disk_spec(client_factory,
- adapter_type)
+ adapter_type,
+ disk_type)
LOG.debug(_('Copying disk data before snapshot of the VM'),
instance=instance)
copy_disk_task = self._session._call_method(
@@ -476,7 +551,7 @@ class VMwareVMOps(object):
adapter_type=adapter_type,
image_version=1,
host=self._session._host_ip,
- data_center_name=self._get_datacenter_name_and_ref()[1],
+ data_center_name=self._get_datacenter_ref_and_name()[1],
datastore_name=datastore_name,
cookies=cookies,
file_path="vmware-tmp/%s-flat.vmdk" % random_name)
@@ -509,9 +584,9 @@ class VMwareVMOps(object):
def reboot(self, instance, network_info):
"""Reboot a VM instance."""
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance.id)
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
self.plug_vifs(instance, network_info)
@@ -552,6 +627,38 @@ class VMwareVMOps(object):
self._session._wait_for_task(instance['uuid'], reset_task)
LOG.debug(_("Did hard reboot of VM"), instance=instance)
+ def _delete(self, instance, network_info):
+ """
+ Destroy a VM instance. Steps followed are:
+ 1. Power off the VM, if it is in poweredOn state.
+ 2. Destroy the VM.
+ """
+ try:
+ vm_ref = vm_util.get_vm_ref_from_name(self._session,
+ instance['name'])
+ if vm_ref is None:
+ LOG.debug(_("instance not present"), instance=instance)
+ return
+
+ self.power_off(instance)
+
+ try:
+ LOG.debug(_("Destroying the VM"), instance=instance)
+ destroy_task = self._session._call_method(
+ self._session._get_vim(),
+ "Destroy_Task", vm_ref)
+ self._session._wait_for_task(instance['uuid'], destroy_task)
+ LOG.debug(_("Destroyed the VM"), instance=instance)
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:vmops:delete, got this exception"
+ " while destroying the VM: %s") % str(excep),
+ instance=instance)
+
+ if network_info:
+ self.unplug_vifs(instance, network_info)
+ except Exception, exc:
+ LOG.exception(exc, instance=instance)
+
def destroy(self, instance, network_info, destroy_disks=True):
"""
Destroy a VM instance. Steps followed are:
@@ -560,7 +667,8 @@ class VMwareVMOps(object):
3. Delete the contents of the folder holding the VM related data.
"""
try:
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session,
+ instance['name'])
if vm_ref is None:
LOG.debug(_("instance not present"), instance=instance)
return
@@ -592,14 +700,15 @@ class VMwareVMOps(object):
try:
LOG.debug(_("Unregistering the VM"), instance=instance)
self._session._call_method(self._session._get_vim(),
- "UnregisterVM", vm_ref)
+ "UnregisterVM", vm_ref)
LOG.debug(_("Unregistered the VM"), instance=instance)
except Exception, excep:
LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
" while un-registering the VM: %s") % str(excep),
instance=instance)
- self.unplug_vifs(instance, network_info)
+ if network_info:
+ self.unplug_vifs(instance, network_info)
# Delete the folder holding the VM related content on
# the datastore.
@@ -617,7 +726,8 @@ class VMwareVMOps(object):
vim,
"DeleteDatastoreFile_Task",
vim.get_service_content().fileManager,
- name=dir_ds_compliant_path)
+ name=dir_ds_compliant_path,
+ datacenter=self._get_datacenter_ref_and_name()[0])
self._session._wait_for_task(instance['uuid'], delete_task)
LOG.debug(_("Deleted contents of the VM from "
"datastore %(datastore_name)s") %
@@ -642,9 +752,9 @@ class VMwareVMOps(object):
def suspend(self, instance):
"""Suspend the specified instance."""
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance.id)
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -658,17 +768,17 @@ class VMwareVMOps(object):
LOG.debug(_("Suspended the VM"), instance=instance)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
- reason = _("instance is powered off and can not be suspended.")
+ reason = _("instance is powered off and cannot be suspended.")
raise exception.InstanceSuspendFailure(reason=reason)
-
- LOG.debug(_("VM was already in suspended state. So returning "
- "without doing anything"), instance=instance)
+ else:
+ LOG.debug(_("VM was already in suspended state. So returning "
+ "without doing anything"), instance=instance)
def resume(self, instance):
"""Resume the specified instance."""
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance.id)
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -684,9 +794,263 @@ class VMwareVMOps(object):
reason = _("instance is not in a suspended state")
raise exception.InstanceResumeFailure(reason=reason)
+ def rescue(self, context, instance, network_info, image_meta):
+ """Rescue the specified instance.
+
+ - shutdown the instance VM.
+ - spawn a rescue VM (the vm name-label will be instance-N-rescue).
+
+ """
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.power_off(instance)
+ instance['name'] = instance['name'] + self._rescue_suffix
+ self.spawn(context, instance, image_meta, network_info)
+
+ # Attach vmdk to the rescue VM
+ hardware_devices = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "config.hardware.device")
+ vmdk_path, controller_key, adapter_type, disk_type, unit_number \
+ = vm_util.get_vmdk_path_and_adapter_type(hardware_devices)
+ # Figure out the correct unit number
+ unit_number = unit_number + 1
+ rescue_vm_ref = vm_util.get_vm_ref_from_name(self._session,
+ instance['name'])
+ self._volumeops.attach_disk_to_vm(
+ rescue_vm_ref, instance,
+ adapter_type, disk_type, vmdk_path,
+ controller_key=controller_key,
+ unit_number=unit_number)
+
+ def unrescue(self, instance):
+ """Unrescue the specified instance."""
+ instance_orig_name = instance['name']
+ instance['name'] = instance['name'] + self._rescue_suffix
+ self.destroy(instance, None)
+ instance['name'] = instance_orig_name
+ self.power_on(instance)
+
+ def power_off(self, instance):
+ """Power off the specified instance."""
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ pwr_state = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "runtime.powerState")
+ # Only PoweredOn VMs can be powered off.
+ if pwr_state == "poweredOn":
+ LOG.debug(_("Powering off the VM"), instance=instance)
+ poweroff_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOffVM_Task", vm_ref)
+ self._session._wait_for_task(instance['uuid'], poweroff_task)
+ LOG.debug(_("Powered off the VM"), instance=instance)
+ # Raise Exception if VM is suspended
+ elif pwr_state == "suspended":
+ reason = _("instance is suspended and cannot be powered off.")
+ raise exception.InstancePowerOffFailure(reason=reason)
+ else:
+ LOG.debug(_("VM was already in powered off state. So returning "
+ "without doing anything"), instance=instance)
+
+ def power_on(self, instance):
+ """Power on the specified instance."""
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ pwr_state = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "runtime.powerState")
+ if pwr_state == "poweredOn":
+ LOG.debug(_("VM was already in powered on state. So returning "
+ "without doing anything"), instance=instance)
+ # Only PoweredOff and Suspended VMs can be powered on.
+ else:
+ LOG.debug(_("Powering on the VM"), instance=instance)
+ poweron_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOnVM_Task", vm_ref)
+ self._session._wait_for_task(instance['uuid'], poweron_task)
+ LOG.debug(_("Powered on the VM"), instance=instance)
+
+ def _get_orig_vm_name_label(self, instance):
+ return instance['name'] + '-orig'
+
+ def _update_instance_progress(self, context, instance, step, total_steps):
+ """Update instance progress percent to reflect current step number
+ """
+ # Divide the action's workflow into discrete steps and "bump" the
+ # instance's progress field as each step is completed.
+ #
+ # For a first cut this should be fine, however, for large VM images,
+ # the clone disk step begins to dominate the equation. A
+ # better approximation would use the percentage of the VM image that
+ # has been streamed to the destination host.
+ progress = round(float(step) / total_steps * 100)
+ instance_uuid = instance['uuid']
+ LOG.debug(_("Updating instance '%(instance_uuid)s' progress to"
+ " %(progress)d") % locals(), instance=instance)
+ self._virtapi.instance_update(context, instance_uuid,
+ {'progress': progress})
+
+ def migrate_disk_and_power_off(self, context, instance, dest,
+ instance_type):
+ """
+ Transfers the disk of a running instance in multiple phases, turning
+ off the instance before the end.
+ """
+ # 0. Zero out the progress to begin
+ self._update_instance_progress(context, instance,
+ step=0,
+ total_steps=RESIZE_TOTAL_STEPS)
+
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance['name'])
+ host_ref = self._get_host_ref_from_name(dest)
+ if host_ref is None:
+ raise exception.HostNotFound(host=dest)
+
+ # 1. Power off the instance
+ self.power_off(instance)
+ self._update_instance_progress(context, instance,
+ step=1,
+ total_steps=RESIZE_TOTAL_STEPS)
+
+ # 2. Rename the original VM with suffix '-orig'
+ name_label = self._get_orig_vm_name_label(instance)
+ LOG.debug(_("Renaming the VM to %s") % name_label,
+ instance=instance)
+ rename_task = self._session._call_method(
+ self._session._get_vim(),
+ "Rename_Task", vm_ref, newName=name_label)
+ self._session._wait_for_task(instance['uuid'], rename_task)
+ LOG.debug(_("Renamed the VM to %s") % name_label,
+ instance=instance)
+ self._update_instance_progress(context, instance,
+ step=2,
+ total_steps=RESIZE_TOTAL_STEPS)
+
+ # Get the clone vm spec
+ ds_ref = vm_util.get_datastore_ref_and_name(self._session)[0]
+ client_factory = self._session._get_vim().client.factory
+ rel_spec = vm_util.relocate_vm_spec(client_factory, ds_ref, host_ref)
+ clone_spec = vm_util.clone_vm_spec(client_factory, rel_spec)
+ vm_folder_ref, res_pool_ref = self._get_vmfolder_and_res_pool_refs()
+
+ # 3. Clone VM on ESX host
+ LOG.debug(_("Cloning VM to host %s") % dest, instance=instance)
+ vm_clone_task = self._session._call_method(
+ self._session._get_vim(),
+ "CloneVM_Task", vm_ref,
+ folder=vm_folder_ref,
+ name=instance['name'],
+ spec=clone_spec)
+ self._session._wait_for_task(instance['uuid'], vm_clone_task)
+ LOG.debug(_("Cloned VM to host %s") % dest, instance=instance)
+ self._update_instance_progress(context, instance,
+ step=3,
+ total_steps=RESIZE_TOTAL_STEPS)
+
+ def confirm_migration(self, migration, instance, network_info):
+ """Confirms a resize, destroying the source VM."""
+ instance_name = self._get_orig_vm_name_label(instance)
+ # Destroy the original VM.
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
+ if vm_ref is None:
+ LOG.debug(_("instance not present"), instance=instance)
+ return
+
+ try:
+ LOG.debug(_("Destroying the VM"), instance=instance)
+ destroy_task = self._session._call_method(
+ self._session._get_vim(),
+ "Destroy_Task", vm_ref)
+ self._session._wait_for_task(instance['uuid'], destroy_task)
+ LOG.debug(_("Destroyed the VM"), instance=instance)
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:vmops:confirm_migration, got this "
+ "exception while destroying the VM: %s") % str(excep))
+
+ if network_info:
+ self.unplug_vifs(instance, network_info)
+
+ def finish_revert_migration(self, instance):
+ """Finish reverting a resize, powering back on the instance."""
+ # The original vm was suffixed with '-orig'; find it using
+ # the old suffix, remove the suffix, then power it back on.
+ name_label = self._get_orig_vm_name_label(instance)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, name_label)
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=name_label)
+
+ LOG.debug(_("Renaming the VM from %s") % name_label,
+ instance=instance)
+ rename_task = self._session._call_method(
+ self._session._get_vim(),
+ "Rename_Task", vm_ref, newName=instance['name'])
+ self._session._wait_for_task(instance['uuid'], rename_task)
+ LOG.debug(_("Renamed the VM from %s") % name_label,
+ instance=instance)
+ self.power_on(instance)
+
+ def finish_migration(self, context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance=False):
+ """Completes a resize, turning on the migrated instance."""
+ # 4. Start VM
+ self.power_on(instance)
+ self._update_instance_progress(context, instance,
+ step=4,
+ total_steps=RESIZE_TOTAL_STEPS)
+
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method, block_migration=False):
+ """Spawning live_migration operation for distributing high-load."""
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_ref.name)
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance_ref.name)
+ host_ref = self._get_host_ref_from_name(dest)
+ if host_ref is None:
+ raise exception.HostNotFound(host=dest)
+
+ LOG.debug(_("Migrating VM to host %s") % dest, instance=instance_ref)
+ try:
+ vm_migrate_task = self._session._call_method(
+ self._session._get_vim(),
+ "MigrateVM_Task", vm_ref,
+ host=host_ref,
+ priority="defaultPriority")
+ self._session._wait_for_task(instance_ref['uuid'], vm_migrate_task)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ recover_method(context, instance_ref, dest, block_migration)
+ post_method(context, instance_ref, dest, block_migration)
+ LOG.debug(_("Migrated VM to host %s") % dest, instance=instance_ref)
+
+ def poll_rebooting_instances(self, timeout, instances):
+ """Poll for rebooting instances."""
+ ctxt = nova_context.get_admin_context()
+
+ instances_info = dict(instance_count=len(instances),
+ timeout=timeout)
+
+ if instances_info["instance_count"] > 0:
+ LOG.info(_("Found %(instance_count)d hung reboots "
+ "older than %(timeout)d seconds") % instances_info)
+
+ for instance in instances:
+ LOG.info(_("Automatically hard rebooting %d") % instance['uuid'])
+ self.compute_api.reboot(ctxt, instance, "HARD")
+
def get_info(self, instance):
"""Return data about the VM instance."""
- vm_ref = self._get_vm_ref_from_the_name(instance['name'])
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance['name'])
@@ -722,9 +1086,9 @@ class VMwareVMOps(object):
def get_console_output(self, instance):
"""Return snapshot of console."""
- vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=instance.id)
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
param_list = {"id": str(vm_ref)}
base_url = "%s://%s/screen?%s" % (self._session._scheme,
self._session._host_ip,
@@ -741,6 +1105,24 @@ class VMwareVMOps(object):
else:
return ""
+ def get_vnc_console(self, instance):
+ """Return connection info for a vnc console."""
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ return {'host': CONF.vmwareapi_host_ip,
+ 'port': self._get_vnc_port(vm_ref),
+ 'internal_access_path': None}
+
+ @staticmethod
+ def _get_vnc_port(vm_ref):
+ """Return VNC port for an VM."""
+ vm_id = int(vm_ref.value.replace('vm-', ''))
+ port = CONF.vnc_port + vm_id % CONF.vnc_port_total
+
+ return port
+
def _set_machine_id(self, client_factory, instance, network_info):
"""
Set the machine id of the VM for guest tools to pick up and reconfigure
@@ -786,12 +1168,56 @@ class VMwareVMOps(object):
"with ip - %(ip_addr)s") % {'ip_addr': ip_v4['ip']},
instance=instance)
- def _get_datacenter_name_and_ref(self):
+ def _set_vnc_config(self, client_factory, instance, port, password):
+ """
+ Set the vnc configuration of the VM.
+ """
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance['name'])
+ if vm_ref is None:
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ vnc_config_spec = vm_util.get_vnc_config_spec(
+ client_factory, port, password)
+
+ LOG.debug(_("Reconfiguring VM instance to enable vnc on "
+ "port - %(port)s") % {'port': port},
+ instance=instance)
+ reconfig_task = self._session._call_method(self._session._get_vim(),
+ "ReconfigVM_Task", vm_ref,
+ spec=vnc_config_spec)
+ self._session._wait_for_task(instance['uuid'], reconfig_task)
+ LOG.debug(_("Reconfigured VM instance to enable vnc on "
+ "port - %(port)s") % {'port': port},
+ instance=instance)
+
+ def _get_datacenter_ref_and_name(self):
"""Get the datacenter name and the reference."""
dc_obj = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["name"])
return dc_obj[0].obj, dc_obj[0].propSet[0].val
+ def _get_host_ref_from_name(self, host_name):
+ """Get reference to the host with the name specified."""
+ host_objs = self._session._call_method(vim_util, "get_objects",
+ "HostSystem", ["name"])
+ for host in host_objs:
+ if host.propSet[0].val == host_name:
+ return host.obj
+ return None
+
+ def _get_vmfolder_and_res_pool_refs(self):
+ """Get the Vm folder ref from the datacenter."""
+ dc_objs = self._session._call_method(vim_util, "get_objects",
+ "Datacenter", ["vmFolder"])
+ # There is only one default datacenter in a standalone ESX host
+ vm_folder_ref = dc_objs[0].propSet[0].val
+
+ # Get the resource pool. Taking the first resource pool coming our
+ # way. Assuming that is the default resource pool.
+ res_pool_ref = self._session._call_method(vim_util, "get_objects",
+ "ResourcePool")[0].obj
+ return vm_folder_ref, res_pool_ref
+
def _path_exists(self, ds_browser, ds_path):
"""Check if the path exists on the datastore."""
search_task = self._session._call_method(self._session._get_vim(),
@@ -812,6 +1238,32 @@ class VMwareVMOps(object):
return False
return True
+ def _path_file_exists(self, ds_browser, ds_path, file_name):
+ """Check if the path and file exists on the datastore."""
+ client_factory = self._session._get_vim().client.factory
+ search_spec = vm_util.search_datastore_spec(client_factory, file_name)
+ search_task = self._session._call_method(self._session._get_vim(),
+ "SearchDatastore_Task",
+ ds_browser,
+ datastorePath=ds_path,
+ searchSpec=search_spec)
+ # Wait till the state changes from queued or running.
+ # If an error state is returned, it means that the path doesn't exist.
+ while True:
+ task_info = self._session._call_method(vim_util,
+ "get_dynamic_property",
+ search_task, "Task", "info")
+ if task_info.state in ['queued', 'running']:
+ time.sleep(2)
+ continue
+ break
+ if task_info.state == "error":
+ return False, False
+
+ file_exists = (getattr(task_info.result, 'file', False) and
+ task_info.result.file[0].path == file_name)
+ return True, file_exists
+
def _mkdir(self, ds_path):
"""
Creates a directory at the path specified. If it is just "NAME",
@@ -824,14 +1276,23 @@ class VMwareVMOps(object):
name=ds_path, createParentDirectories=False)
LOG.debug(_("Created directory with path %s") % ds_path)
- def _get_vm_ref_from_the_name(self, vm_name):
- """Get reference to the VM with the name specified."""
- vms = self._session._call_method(vim_util, "get_objects",
- "VirtualMachine", ["name"])
- for vm in vms:
- if vm.propSet[0].val == vm_name:
- return vm.obj
- return None
+ def _check_if_folder_file_exists(self, ds_ref, ds_name,
+ folder_name, file_name):
+ ds_browser = vim_util.get_dynamic_property(
+ self._session._get_vim(),
+ ds_ref,
+ "Datastore",
+ "browser")
+ # Check if the folder exists or not. If not, create one
+ # Check if the file exists or not.
+ folder_path = vm_util.build_datastore_path(ds_name, folder_name)
+ folder_exists, file_exists = self._path_file_exists(ds_browser,
+ folder_path,
+ file_name)
+ if not folder_exists:
+ self._mkdir(vm_util.build_datastore_path(ds_name, folder_name))
+
+ return file_exists
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
@@ -872,3 +1333,21 @@ class VMwareVMOps(object):
interfaces.append(device.key)
return interfaces
+
+ @staticmethod
+ def _volume_in_mapping(mount_device, block_device_info):
+ block_device_list = [block_device.strip_dev(vol['mount_device'])
+ for vol in
+ driver.block_device_info_get_mapping(
+ block_device_info)]
+ swap = driver.block_device_info_get_swap(block_device_info)
+ if driver.swap_is_usable(swap):
+ block_device_list.append(
+ block_device.strip_dev(swap['device_name']))
+ block_device_list += [block_device.strip_dev(ephemeral['device_name'])
+ for ephemeral in
+ driver.block_device_info_get_ephemerals(
+ block_device_info)]
+
+ LOG.debug(_("block_device_list %s"), block_device_list)
+ return block_device.strip_dev(mount_device) in block_device_list
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py
index 7c4480ea0..e8510b36e 100644
--- a/nova/virt/vmwareapi/vmware_images.py
+++ b/nova/virt/vmwareapi/vmware_images.py
@@ -1,5 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack LLC.
#
@@ -17,7 +18,6 @@
"""
Utility functions for Image transfer.
"""
-import StringIO
from nova import exception
from nova.image import glance
@@ -56,7 +56,7 @@ def start_transfer(context, read_file_handle, data_size,
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
# In case of VMware - Glance transfer, we relinquish VMware HTTP file read
# handle to Glance Client instance, but to be sure of the transfer we need
- # to be sure of the status of the image on glnace changing to active.
+ # to be sure of the status of the image on glance changing to active.
# The GlanceWriteThread handles the same for us.
elif image_service and image_id:
write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe,
@@ -93,9 +93,8 @@ def fetch_image(context, image, instance, **kwargs):
(image_service, image_id) = glance.get_remote_image_service(context, image)
metadata = image_service.show(context, image_id)
file_size = int(metadata['size'])
- f = StringIO.StringIO()
- image_service.download(context, image_id, f)
- read_file_handle = read_write_util.GlanceFileRead(f)
+ read_iter = image_service.download(context, image_id)
+ read_file_handle = read_write_util.GlanceFileRead(read_iter)
write_file_handle = read_write_util.VMwareHTTPWriteFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
@@ -122,10 +121,9 @@ def upload_image(context, image, instance, **kwargs):
file_size = read_file_handle.get_size()
(image_service, image_id) = glance.get_remote_image_service(context, image)
# The properties and other fields that we need to set for the image.
- image_metadata = {"is_public": True,
- "disk_format": "vmdk",
+ image_metadata = {"disk_format": "vmdk",
"container_format": "bare",
- "type": "vmdk",
+ "size": file_size,
"properties": {"vmware_adaptertype":
kwargs.get("adapter_type"),
"vmware_ostype": kwargs.get("os_type"),
diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py
index 5ec389f80..922d2135b 100644
--- a/nova/virt/vmwareapi/volumeops.py
+++ b/nova/virt/vmwareapi/volumeops.py
@@ -18,7 +18,6 @@
Management class for Storage-related functions (attach, detach, etc).
"""
-from nova import context
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -110,7 +109,8 @@ class VMwareVolumeOps(object):
iqn = volume_util.get_host_iqn(self._session)
return {
'ip': CONF.vmwareapi_host_ip,
- 'initiator': iqn
+ 'initiator': iqn,
+ 'host': CONF.vmwareapi_host_ip
}
def attach_volume(self, connection_info, instance, mountpoint):