summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorSalvatore Orlando <salvatore.orlando@eu.citrix.com>2011-03-09 12:52:10 +0000
committerSalvatore Orlando <salvatore.orlando@eu.citrix.com>2011-03-09 12:52:10 +0000
commitb6ea59aa3b71732ae65bf87aa59b41a7d25f8ec2 (patch)
treeacfe5e2e7dfd8c8fe5c4a1359a56aedf9e06b8e2 /nova/virt
parent748b3102320a9de3444b067aa783e8f3d7bc5f5c (diff)
parentf42fda8566383bf0271e5b79e1385c41731639b9 (diff)
downloadnova-b6ea59aa3b71732ae65bf87aa59b41a7d25f8ec2.tar.gz
nova-b6ea59aa3b71732ae65bf87aa59b41a7d25f8ec2.tar.xz
nova-b6ea59aa3b71732ae65bf87aa59b41a7d25f8ec2.zip
Merged with trunk
Updated exception handling according to spawn refactoring
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/fake.py31
-rw-r--r--nova/virt/xenapi/fake.py3
-rw-r--r--nova/virt/xenapi/vm_utils.py112
-rw-r--r--nova/virt/xenapi/vmops.py319
-rw-r--r--nova/virt/xenapi_conn.py19
5 files changed, 340 insertions, 144 deletions
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 4346dffc1..c744acf91 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -139,6 +139,24 @@ class FakeConnection(object):
"""
pass
+ def get_host_ip_addr(self):
+ """
+ Retrieves the IP address of the dom0
+ """
+ pass
+
+ def resize(self, instance, flavor):
+ """
+ Resizes/Migrates the specified instance.
+
+ The flavor parameter determines whether or not the instance RAM and
+ disk space are modified, and if so, to what size.
+
+ The work will be done asynchronously. This function returns a task
+ that allows the caller to detect when it is complete.
+ """
+ pass
+
def set_admin_password(self, instance, new_pass):
"""
Set the root password on the specified instance.
@@ -179,6 +197,19 @@ class FakeConnection(object):
"""
pass
+ def migrate_disk_and_power_off(self, instance, dest):
+ """
+ Transfers the disk of a running instance in multiple phases, turning
+ off the instance before the end.
+ """
+ pass
+
+ def attach_disk(self, instance, disk_info):
+ """
+ Attaches the disk to an instance given the metadata disk_info
+ """
+ pass
+
def pause(self, instance, callback):
"""
Pause the specified instance.
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index e1ae03e70..ba12d4d3a 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -290,6 +290,9 @@ class SessionBase(object):
#Always return 12GB available
return 12 * 1024 * 1024 * 1024
+ def host_call_plugin(*args):
+ return 'herp'
+
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 0baeac8ed..df9b036d0 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -264,16 +264,31 @@ class VMHelper(HelperBase):
return vdi_ref
@classmethod
+ def get_vdi_for_vm_safely(cls, session, vm_ref):
+ vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref)
+ if vdi_refs is None:
+ raise Exception(_("No VDIs found for VM %s") % vm_ref)
+ else:
+ num_vdis = len(vdi_refs)
+ if num_vdis != 1:
+ raise Exception(
+ _("Unexpected number of VDIs (%(num_vdis)s) found"
+ " for VM %(vm_ref)s") % locals())
+
+ vdi_ref = vdi_refs[0]
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ return vdi_ref, vdi_rec
+
+ @classmethod
def create_snapshot(cls, session, instance_id, vm_ref, label):
- """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
- Snapshot VHD
- """
+ """Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
+ Snapshot VHD"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...")
% locals())
- vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
+ vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
sr_ref = vm_vdi_rec["SR"]
@@ -281,7 +296,8 @@ class VMHelper(HelperBase):
task = session.call_xenapi('Async.VM.snapshot', vm_ref, label)
template_vm_ref = session.wait_for_task(task, instance_id)
- template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
+ template_vdi_rec = cls.get_vdi_for_vm_safely(session,
+ template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
LOG.debug(_('Created snapshot %(template_vm_ref)s from'
@@ -296,6 +312,24 @@ class VMHelper(HelperBase):
return template_vm_ref, template_vdi_uuids
@classmethod
+ def get_sr(cls, session, sr_label='slices'):
+ """Finds the SR named by the given name label and returns
+ the UUID"""
+ return session.call_xenapi('SR.get_by_name_label', sr_label)[0]
+
+ @classmethod
+ def get_sr_path(cls, session):
+ """Return the path to our storage repository
+
+ This is used when we're dealing with VHDs directly, either by taking
+ snapshots or by restoring an image in the DISK_VHD format.
+ """
+ sr_ref = safe_find_sr(session)
+ sr_rec = session.get_xenapi().SR.get_record(sr_ref)
+ sr_uuid = sr_rec["uuid"]
+ return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
+
+ @classmethod
def upload_image(cls, session, instance_id, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
@@ -309,7 +343,7 @@ class VMHelper(HelperBase):
'image_id': image_id,
'glance_host': FLAGS.glance_host,
'glance_port': FLAGS.glance_port,
- 'sr_path': get_sr_path(session)}
+ 'sr_path': cls.get_sr_path(session)}
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
@@ -343,23 +377,23 @@ class VMHelper(HelperBase):
try:
sr_ref = safe_find_sr(session)
- # NOTE(sirp): The Glance plugin runs under Python 2.4 which
- # does not have the `uuid` module. To work around this, we
- # generate the uuids here (under Python 2.6+) and pass them
- # as arguments
+ # NOTE(sirp): The Glance plugin runs under Python 2.4
+ # which does not have the `uuid` module. To work around this,
+ # we generate the uuids here (under Python 2.6+) and
+ # pass them as arguments
uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
params = {'image_id': image,
'glance_host': FLAGS.glance_host,
'glance_port': FLAGS.glance_port,
'uuid_stack': uuid_stack,
- 'sr_path': get_sr_path(session)}
+ 'sr_path': cls.get_sr_path(session)}
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'download_vhd', kwargs)
vdi_uuid = session.wait_for_task(task, instance_id)
- scan_sr(session, instance_id, sr_ref)
+ cls.scan_sr(session, instance_id, sr_ref)
# Set the name-label to ease debugging
vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
@@ -408,7 +442,6 @@ class VMHelper(HelperBase):
name_label = get_name_label_for_image(image)
vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
-
with_vdi_attached_here(session, vdi, False,
lambda dev:
_stream_disk(dev, image_type,
@@ -464,19 +497,21 @@ class VMHelper(HelperBase):
"%(image_id)s, instance %(instance_id)s") % locals())
def determine_from_glance():
- glance_type2nova_type = {'machine': ImageType.DISK,
- 'raw': ImageType.DISK_RAW,
- 'vhd': ImageType.DISK_VHD,
- 'kernel': ImageType.KERNEL,
- 'ramdisk': ImageType.RAMDISK}
+ glance_disk_format2nova_type = {'ami': ImageType.DISK,
+ 'raw': ImageType.DISK_RAW,
+ 'vhd': ImageType.DISK_VHD,
+ 'aki': ImageType.KERNEL,
+ 'ari': ImageType.RAMDISK}
+
client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
meta = client.get_image_meta(instance.image_id)
- type_ = meta['type']
+ disk_format = meta['disk_format']
try:
- return glance_type2nova_type[type_]
+ return glance_disk_format2nova_type[disk_format]
except KeyError:
raise exception.NotFound(
- _("Unrecognized image type '%(type_)s'") % locals())
+ _("Unrecognized disk_format '%(disk_format)s'")
+ % locals())
def determine_from_instance():
if instance.kernel_id:
@@ -640,6 +675,21 @@ class VMHelper(HelperBase):
except cls.XenAPI.Failure as e:
return {"Unable to retrieve diagnostics": e}
+ @classmethod
+ def scan_sr(cls, session, instance_id=None, sr_ref=None):
+ """Scans the SR specified by sr_ref"""
+ if sr_ref:
+ LOG.debug(_("Re-scanning SR %s"), sr_ref)
+ task = session.call_xenapi('Async.SR.scan', sr_ref)
+ session.wait_for_task(task, instance_id)
+
+ @classmethod
+ def scan_default_sr(cls, session):
+ """Looks for the system default SR and triggers a re-scan"""
+ #FIXME(sirp/mdietz): refactor scan_default_sr in there
+ sr_ref = cls.get_sr(session)
+ session.call_xenapi('SR.scan', sr_ref)
+
def get_rrd(host, uuid):
"""Return the VM RRD XML as a string"""
@@ -682,12 +732,6 @@ def get_vhd_parent_uuid(session, vdi_ref):
return None
-def scan_sr(session, instance_id, sr_ref):
- LOG.debug(_("Re-scanning SR %s"), sr_ref)
- task = session.call_xenapi('Async.SR.scan', sr_ref)
- session.wait_for_task(task, instance_id)
-
-
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
""" Spin until the parent VHD is coalesced into its parent VHD
@@ -712,7 +756,7 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
" %(max_attempts)d), giving up...") % locals())
raise exception.Error(msg)
- scan_sr(session, instance_id, sr_ref)
+ VMHelper.scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
@@ -769,18 +813,6 @@ def find_sr(session):
return None
-def get_sr_path(session):
- """Return the path to our storage repository
-
- This is used when we're dealing with VHDs directly, either by taking
- snapshots or by restoring an image in the DISK_VHD format.
- """
- sr_ref = safe_find_sr(session)
- sr_rec = session.get_xenapi().SR.get_record(sr_ref)
- sr_uuid = sr_rec["uuid"]
- return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
-
-
def remap_vbd_dev(dev):
"""Return the appropriate location for a plugged-in VBD device
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 69242be4c..fc61d71a6 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -22,6 +22,7 @@ Management class for VM-related functions (spawn, reboot, etc).
import json
import M2Crypto
import os
+import pickle
import subprocess
import tempfile
import uuid
@@ -62,13 +63,76 @@ class VMOps(object):
vms.append(rec["name_label"])
return vms
+ def _start(self, instance, vm_ref=None):
+ """Power on a VM instance"""
+ if not vm_ref:
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ if vm_ref is None:
+ raise exception(_('Attempted to power on non-existent instance'
+ ' bad instance id %s') % instance.id)
+ LOG.debug(_("Starting instance %s"), instance.name)
+ self._session.call_xenapi('VM.start', vm_ref, False, False)
+
+ def create_disk(self, instance):
+ user = AuthManager().get_user(instance.user_id)
+ project = AuthManager().get_project(instance.project_id)
+ disk_image_type = VMHelper.determine_disk_image_type(instance)
+ #if fetch image fails the exception will be handled in spawn
+ vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
+ instance.image_id, user, project, disk_image_type)
+ return (vdi_uuid, disk_image_type)
+
def spawn(self, instance):
+ try:
+ vdi_uuid = disk_image_type = None
+ (vdi_uuid, disk_image_type) = self.create_disk(instance)
+ self._spawn_with_disk(instance, vdi_uuid=vdi_uuid)
+ except BaseException as spawn_error:
+
+ LOG.exception(_("instance %s: Failed to spawn"),
+ instance.id, exc_info=sys.exc_info())
+ LOG.debug(_('Instance %s failed to spawn - performing clean-up'),
+ instance.id)
+ vdis = {}
+ if vdi_uuid:
+ vdis[disk_image_type] = vdi_uuid
+ #extract VDI uuid from spawn error
+ if len(spawn_error.args) > 0:
+ last_arg = spawn_error.args[len(spawn_error.args) - 1]
+ if isinstance(last_arg, dict):
+ for item in last_arg:
+ vdis[item] = last_arg[item]
+ LOG.debug(_("VDIS to remove:%s"), vdis)
+ remove_from_dom0 = False
+ for vdi_type in vdis:
+ vdi_to_remove = vdis[vdi_type]
+ if vdi_type in (ImageType.KERNEL, ImageType.RAMDISK):
+ remove_from_dom0 = True
+ try:
+ vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
+ vdi_to_remove)
+ LOG.debug(_('Removing VDI %(vdi_ref)s' +
+ '(uuid:%(vdi_to_remove)s)'), locals())
+ except:
+ #vdi already deleted
+ LOG.debug(_("Skipping VDI destroy for %s"), vdi_to_remove)
+ continue
+ VMHelper.destroy_vdi(self._session, vdi_ref)
+ if remove_from_dom0:
+ LOG.debug(_("Removing kernel/ramdisk files from dom0"))
+ self._destroy_kernel_ramdisk_plugin_call(
+ vdis[ImageType.KERNEL], vdis[ImageType.RAMDISK])
+
+ #re-throw the error
+ raise spawn_error
+
+ def _spawn_with_disk(self, instance, vdi_uuid):
"""Create VM instance"""
instance_name = instance.name
vm = VMHelper.lookup(self._session, instance_name)
if vm is not None:
raise exception.Duplicate(_('Attempted to create'
- ' non-unique name %s') % instance_name)
+ ' non-unique name %s') % instance_name)
#ensure enough free memory is available
if not VMHelper.ensure_free_mem(self._session, instance):
@@ -82,39 +146,36 @@ class VMOps(object):
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
+ kernel = ramdisk = pv_kernel = None
+
+ # Are we building from a pre-existing disk?
+ vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
+
disk_image_type = VMHelper.determine_disk_image_type(instance)
- vdi_uuid = None
- vdi_ref = None
- kernel = None
- ramdisk = None
- try:
- vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
- instance.image_id, user, project, disk_image_type)
- vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
-
- pv_kernel = False
- if disk_image_type == ImageType.DISK_RAW:
- #Have a look at the VDI and see if it has a PV kernel
- pv_kernel = VMHelper.lookup_image(self._session, instance.id,
- vdi_ref)
- elif disk_image_type == ImageType.DISK_VHD:
- # TODO(sirp): Assuming PV for now; this will need to be
- # configurable as Windows will use HVM.
- pv_kernel = True
-
- if instance.kernel_id:
- kernel = VMHelper.fetch_image(self._session, instance.id,
- instance.kernel_id, user, project,
- ImageType.KERNEL)
-
- if instance.ramdisk_id:
- ramdisk = VMHelper.fetch_image(self._session, instance.id,
- instance.ramdisk_id, user, project,
- ImageType.RAMDISK)
+ if disk_image_type == ImageType.DISK_RAW:
+ # Have a look at the VDI and see if it has a PV kernel
+ pv_kernel = VMHelper.lookup_image(self._session, instance.id,
+ vdi_ref)
+ elif disk_image_type == ImageType.DISK_VHD:
+ # TODO(sirp): Assuming PV for now; this will need to be
+ # configurable as Windows will use HVM.
+ pv_kernel = True
+
+ if instance.kernel_id:
+ kernel = VMHelper.fetch_image(self._session, instance.id,
+ instance.kernel_id, user, project, ImageType.KERNEL)
+
+ if instance.ramdisk_id:
+ ramdisk = VMHelper.fetch_image(self._session, instance.id,
+ instance.ramdisk_id, user, project, ImageType.RAMDISK)
+
+ try:
vm_ref = VMHelper.create_vm(self._session,
instance, kernel, ramdisk, pv_kernel)
- except BaseException as spawn_error:
+ except BaseException as vm_create_error:
+ # if the spwan process fails here it will be necessary to
+ # clean up kernel and ramdisk (VDIs and files in dom0)
def _vdi_uuid(filename):
#Note: we assume the file name is the same as
@@ -127,47 +188,18 @@ class VMOps(object):
return splits[n_splits - 1]
return None
- LOG.exception(_("instance %s: Failed to spawn"),
+ LOG.exception(_("instance %s: Failed to spawn - " +
+ "Unable to create VM"),
instance.id, exc_info=sys.exc_info())
- LOG.debug(_('Instance %s failed to spawn - performing clean-up'),
- instance.id)
vdis = {}
- if vdi_uuid:
- vdis[ImageType.DISK] = vdi_uuid
vdis[ImageType.KERNEL] = kernel and _vdi_uuid(kernel) or None
vdis[ImageType.RAMDISK] = ramdisk and _vdi_uuid(ramdisk) or None
- #extract VDI uuid from spawn error
- if len(spawn_error.args) > 0:
- last_arg = spawn_error.args[len(spawn_error.args) - 1]
- if isinstance(last_arg, dict):
- for item in last_arg:
- vdis[item] = last_arg[item]
- LOG.debug(_("VDIS to remove:%s"), vdis)
- remove_from_dom0 = False
- for vdi_type in vdis:
- vdi_uuid = vdis[vdi_type]
- if vdi_type in (ImageType.KERNEL, ImageType.RAMDISK):
- remove_from_dom0 = True
- try:
- vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
- vdi_uuid)
- LOG.debug(_('Removing VDI %(vdi_ref)s' +
- '(uuid:%(vdi_uuid)s)'),
- locals())
- except:
- #vdi already deleted
- LOG.debug(_("Skipping VDI destroy for %s"), vdi_uuid)
- continue
- VMHelper.destroy_vdi(self._session, vdi_ref)
- if remove_from_dom0:
- LOG.debug(_("Removing kernel/ramdisk files from dom0"))
- self._destroy_kernel_ramdisk_plugin_call(
- vdis[ImageType.KERNEL], vdis[ImageType.RAMDISK])
+ vm_create_error.args = vm_create_error.args + (vdis,)
+ raise vm_create_error
- #re-throw the error
- raise spawn_error
+ VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
+ vdi_ref=vdi_ref, userdevice=0, bootable=True)
- VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
# inject_network_info and create vifs
networks = self.inject_network_info(instance)
self.create_vifs(instance, networks)
@@ -228,35 +260,38 @@ class VMOps(object):
"""Refactored out the common code of many methods that receive either
a vm name or a vm instance, and want a vm instance in return.
"""
- vm = None
- try:
- if instance_or_vm.startswith("OpaqueRef:"):
- # Got passed an opaque ref; return it
+ # if instance_or_vm is a string it must be opaque ref or instance name
+ if isinstance(instance_or_vm, basestring):
+ obj = None
+ try:
+ # check for opaque ref
+ obj = self._session.get_xenapi().VM.get_record(instance_or_vm)
return instance_or_vm
- else:
- # Must be the instance name
+ except self.XenAPI.Failure:
+ # wasn't an opaque ref, must be an instance name
instance_name = instance_or_vm
- except (AttributeError, KeyError):
- # Note the the KeyError will only happen with fakes.py
- # Not a string; must be an ID or a vm instance
- if isinstance(instance_or_vm, (int, long)):
- ctx = context.get_admin_context()
- try:
- instance_obj = db.instance_get(ctx, instance_or_vm)
- instance_name = instance_obj.name
- except exception.NotFound:
- # The unit tests screw this up, as they use an integer for
- # the vm name. I'd fix that up, but that's a matter for
- # another bug report. So for now, just try with the passed
- # value
- instance_name = instance_or_vm
- else:
- instance_name = instance_or_vm.name
- vm = VMHelper.lookup(self._session, instance_name)
- if vm is None:
+
+ # if instance_or_vm is an int/long it must be instance id
+ elif isinstance(instance_or_vm, (int, long)):
+ ctx = context.get_admin_context()
+ try:
+ instance_obj = db.instance_get(ctx, instance_or_vm)
+ instance_name = instance_obj.name
+ except exception.NotFound:
+ # The unit tests screw this up, as they use an integer for
+ # the vm name. I'd fix that up, but that's a matter for
+ # another bug report. So for now, just try with the passed
+ # value
+ instance_name = instance_or_vm
+
+ # otherwise instance_or_vm is an instance object
+ else:
+ instance_name = instance_or_vm.name
+ vm_ref = VMHelper.lookup(self._session, instance_name)
+ if vm_ref is None:
raise exception.NotFound(
_('Instance not present %s') % instance_name)
- return vm
+ return vm_ref
def _acquire_bootlock(self, vm):
"""Prevent an instance from booting"""
@@ -273,7 +308,7 @@ class VMOps(object):
"start")
def snapshot(self, instance, image_id):
- """ Create snapshot from a running VM instance
+ """Create snapshot from a running VM instance
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
@@ -294,7 +329,20 @@ class VMOps(object):
that will bundle the VHDs together and then push the bundle into
Glance.
"""
+ template_vm_ref = None
+ try:
+ template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
+ # call plugin to ship snapshot off to glance
+ VMHelper.upload_image(
+ self._session, instance.id, template_vdi_uuids, image_id)
+ finally:
+ if template_vm_ref:
+ self._destroy(instance, template_vm_ref,
+ shutdown=False, destroy_kernel_ramdisk=False)
+ logging.debug(_("Finished snapshot and upload for VM %s"), instance)
+
+ def _get_snapshot(self, instance):
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
@@ -305,20 +353,89 @@ class VMOps(object):
try:
template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
self._session, instance.id, vm_ref, label)
+ return template_vm_ref, template_vdi_uuids
except self.XenAPI.Failure, exc:
logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
% locals())
return
+ def migrate_disk_and_power_off(self, instance, dest):
+ """Copies a VHD from one host machine to another
+
+ :param instance: the instance that owns the VHD in question
+ :param dest: the destination host machine
+ :param disk_type: values are 'primary' or 'cow'
+ """
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+
+ # The primary VDI becomes the COW after the snapshot, and we can
+ # identify it via the VBD. The base copy is the parent_uuid returned
+ # from the snapshot creation
+
+ base_copy_uuid = cow_uuid = None
+ template_vdi_uuids = template_vm_ref = None
try:
- # call plugin to ship snapshot off to glance
- VMHelper.upload_image(
- self._session, instance.id, template_vdi_uuids, image_id)
+ # transfer the base copy
+ template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
+ base_copy_uuid = template_vdi_uuids[1]
+ vdi_ref, vm_vdi_rec = \
+ VMHelper.get_vdi_for_vm_safely(self._session, vm_ref)
+ cow_uuid = vm_vdi_rec['uuid']
+
+ params = {'host': dest,
+ 'vdi_uuid': base_copy_uuid,
+ 'instance_id': instance.id,
+ 'sr_path': VMHelper.get_sr_path(self._session)}
+
+ task = self._session.async_call_plugin('migration', 'transfer_vhd',
+ {'params': pickle.dumps(params)})
+ self._session.wait_for_task(task, instance.id)
+
+ # Now power down the instance and transfer the COW VHD
+ self._shutdown(instance, vm_ref, method='clean')
+
+ params = {'host': dest,
+ 'vdi_uuid': cow_uuid,
+ 'instance_id': instance.id,
+ 'sr_path': VMHelper.get_sr_path(self._session), }
+
+ task = self._session.async_call_plugin('migration', 'transfer_vhd',
+ {'params': pickle.dumps(params)})
+ self._session.wait_for_task(task, instance.id)
+
finally:
- self._destroy(instance, template_vm_ref, shutdown=False,
- destroy_kernel_ramdisk=False)
+ if template_vm_ref:
+ self._destroy(instance, template_vm_ref,
+ shutdown=False, destroy_kernel_ramdisk=False)
- logging.debug(_("Finished snapshot and upload for VM %s"), instance)
+ # TODO(mdietz): we could also consider renaming these to something
+ # sensible so we don't need to blindly pass around dictionaries
+ return {'base_copy': base_copy_uuid, 'cow': cow_uuid}
+
+ def attach_disk(self, instance, base_copy_uuid, cow_uuid):
+ """Links the base copy VHD to the COW via the XAPI plugin"""
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ new_base_copy_uuid = str(uuid.uuid4())
+ new_cow_uuid = str(uuid.uuid4())
+ params = {'instance_id': instance.id,
+ 'old_base_copy_uuid': base_copy_uuid,
+ 'old_cow_uuid': cow_uuid,
+ 'new_base_copy_uuid': new_base_copy_uuid,
+ 'new_cow_uuid': new_cow_uuid,
+ 'sr_path': VMHelper.get_sr_path(self._session), }
+
+ task = self._session.async_call_plugin('migration',
+ 'move_vhds_into_sr', {'params': pickle.dumps(params)})
+ self._session.wait_for_task(task, instance.id)
+
+ # Now we rescan the SR so we find the VHDs
+ VMHelper.scan_default_sr(self._session)
+
+ return new_cow_uuid
+
+ def resize(self, instance, flavor):
+ """Resize a running instance by changing it's RAM and disk size """
+ raise NotImplementedError()
def reboot(self, instance):
"""Reboot VM instance"""
@@ -364,11 +481,6 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def _start(self, instance, vm):
- """Start an instance"""
- task = self._session.call_xenapi("Async.VM.start", vm, False, False)
- self._session.wait_for_task(task, instance.id)
-
def inject_file(self, instance, b64_path, b64_contents):
"""Write a file to the VM instance. The path to which it is to be
written and the contents of the file need to be supplied; both should
@@ -411,8 +523,7 @@ class VMOps(object):
if hard:
task = self._session.call_xenapi("Async.VM.hard_shutdown", vm)
else:
- task = self._session.call_xenapi("Async.VM.clean_shutdown", vm)
-
+ task = self._session.call_xenapi('Async.VM.clean_shutdown', vm)
self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index d578c04ec..b63a5f8c3 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -158,10 +158,20 @@ class XenAPIConnection(object):
"""Create VM instance"""
self._vmops.spawn(instance)
+ def finish_resize(self, instance, disk_info):
+ """Completes a resize, turning on the migrated instance"""
+ vdi_uuid = self._vmops.attach_disk(instance, disk_info['base_copy'],
+ disk_info['cow'])
+ self._vmops._spawn_with_disk(instance, vdi_uuid)
+
def snapshot(self, instance, image_id):
""" Create snapshot from a running VM instance """
self._vmops.snapshot(instance, image_id)
+ def resize(self, instance, flavor):
+ """Resize a VM instance"""
+ raise NotImplementedError()
+
def reboot(self, instance):
"""Reboot VM instance"""
self._vmops.reboot(instance)
@@ -188,6 +198,11 @@ class XenAPIConnection(object):
"""Unpause paused VM instance"""
self._vmops.unpause(instance, callback)
+ def migrate_disk_and_power_off(self, instance, dest):
+ """Transfers the VHD of a running instance to another host, then shuts
+ off the instance copies over the COW disk"""
+ return self._vmops.migrate_disk_and_power_off(instance, dest)
+
def suspend(self, instance, callback):
"""suspend the specified instance"""
self._vmops.suspend(instance, callback)
@@ -228,6 +243,10 @@ class XenAPIConnection(object):
"""Return link to instance's ajax console"""
return self._vmops.get_ajax_console(instance)
+ def get_host_ip_addr(self):
+ xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
+ return xs_url.netloc
+
def attach_volume(self, instance_name, device_path, mountpoint):
"""Attach volume storage to VM instance"""
return self._volumeops.attach_volume(instance_name,