summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorRick Harris <rconradharris@gmail.com>2012-07-09 21:49:03 +0000
committerRick Harris <rconradharris@gmail.com>2012-07-16 11:49:15 -0400
commitf5583a7840947c9eb5b0fb766daecc4e99dd95fe (patch)
treeda43a5d144cffa4ed3c0f687ce0d19cad5c94674 /nova
parent57e4ee833d1897b5114be193be3b6f7fa38f49bf (diff)
Remove VDI chain limit for migrations.
The strategy for removing the limit is to refactor migration so that they work nearly identically to snapshots, meaning sequence-numbered VHDs are rsynced over into a staging-area and then imported into the SR using the `import_vhds` function. Change-Id: Ibf5c82c52ae7d505ea9e54d64fcc8b8fdce4d05d
Diffstat (limited to 'nova')
-rw-r--r--nova/tests/xenapi/stubs.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py38
-rw-r--r--nova/virt/xenapi/vmops.py219
3 files changed, 128 insertions, 131 deletions
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index ec944d84d..483356424 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -346,7 +346,7 @@ def stub_out_migration_methods(stubs):
pass
stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
- stubs.Set(vmops.VMOps, '_move_disks', fake_move_disks)
+ stubs.Set(vm_utils, 'move_disks', fake_move_disks)
stubs.Set(vm_utils, 'scan_default_sr', fake_sr)
stubs.Set(vm_utils, '_scan_sr', fake_sr)
stubs.Set(vm_utils, 'snapshot_attached_here', fake_snapshot_attached_here)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index e87dbe9b6..c17f53ee8 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -897,6 +897,14 @@ def _fetch_using_dom0_plugin_with_retry(context, session, image_id,
raise exception.CouldNotFetchImage(image_id=image_id)
+def _make_uuid_stack():
+ # NOTE(sirp): The XenAPI plugins run under Python 2.4
+ # which does not have the `uuid` module. To work around this,
+ # we generate the uuids here (under Python 2.6+) and
+ # pass them as arguments
+ return [str(uuid.uuid4()) for i in xrange(MAX_VDI_CHAIN_SIZE)]
+
+
def _fetch_vhd_image(context, session, instance, image_id):
"""Tell glance to download an image and put the VHDs into the SR
@@ -905,13 +913,8 @@ def _fetch_vhd_image(context, session, instance, image_id):
LOG.debug(_("Asking xapi to fetch vhd image %(image_id)s"), locals(),
instance=instance)
- # NOTE(sirp): The XenAPI plugins run under Python 2.4
- # which does not have the `uuid` module. To work around this,
- # we generate the uuids here (under Python 2.6+) and
- # pass them as arguments
- uuid_stack = [str(uuid.uuid4()) for i in xrange(MAX_VDI_CHAIN_SIZE)]
params = {'image_id': image_id,
- 'uuid_stack': uuid_stack,
+ 'uuid_stack': _make_uuid_stack(),
'sr_path': get_sr_path(session),
'auth_token': getattr(context, 'auth_token', None)}
@@ -2049,3 +2052,26 @@ def ensure_correct_host(session):
raise
raise Exception(_('This domU must be running on the host '
'specified by xenapi_connection_url'))
+
+
+def move_disks(session, instance, disk_info):
+ """Move and possibly link VHDs via the XAPI plugin."""
+ params = {'instance_uuid': instance['uuid'],
+ 'sr_path': get_sr_path(session),
+ 'uuid_stack': _make_uuid_stack()}
+
+ result = session.call_plugin(
+ 'migration', 'move_vhds_into_sr', {'params': pickle.dumps(params)})
+ imported_vhds = jsonutils.loads(result)
+
+ # Now we rescan the SR so we find the VHDs
+ scan_default_sr(session)
+
+ # Set name-label so we can find if we need to clean up a failed
+ # migration
+ root_uuid = imported_vhds['root']['uuid']
+ set_vdi_name(session, root_uuid, instance.name, 'root')
+
+ root_vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid)
+
+ return {'uuid': root_uuid, 'ref': root_vdi_ref}
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 687ec0b65..4805ccaf7 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -21,6 +21,7 @@ Management class for VM-related functions (spawn, reboot, etc).
import cPickle as pickle
import functools
+import itertools
import os
import time
import uuid
@@ -203,7 +204,7 @@ class VMOps(object):
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance):
- root_vdi = self._move_disks(instance, disk_info)
+ root_vdi = vm_utils.move_disks(self._session, instance, disk_info)
if resize_instance:
self._resize_instance(instance, root_vdi)
@@ -613,12 +614,15 @@ class VMOps(object):
LOG.debug(_("Finished snapshot and upload for VM"),
instance=instance)
- def _migrate_vhd(self, instance, vdi_uuid, dest, sr_path):
+ def _migrate_vhd(self, instance, vdi_uuid, dest, sr_path, seq_num):
+ LOG.debug(_("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"),
+ locals(), instance=instance)
instance_uuid = instance['uuid']
params = {'host': dest,
'vdi_uuid': vdi_uuid,
'instance_uuid': instance_uuid,
- 'sr_path': sr_path}
+ 'sr_path': sr_path,
+ 'seq_num': seq_num}
try:
_params = {'params': pickle.dumps(params)}
@@ -648,29 +652,49 @@ class VMOps(object):
instance=instance)
db.instance_update(context, instance['uuid'], {'progress': progress})
- def migrate_disk_and_power_off(self, context, instance, dest,
- instance_type):
- """Copies a VHD from one host machine to another, possibly
- resizing filesystem before hand.
+ def _migrate_disk_resizing_down(self, context, instance, dest,
+ instance_type, vm_ref, sr_path):
+ # 1. NOOP since we're not transmitting the base-copy separately
+ self._update_instance_progress(context, instance,
+ step=1,
+ total_steps=RESIZE_TOTAL_STEPS)
- :param instance: the instance that owns the VHD in question.
- :param dest: the destination host machine.
- :param disk_type: values are 'primary' or 'cow'.
+ old_gb = instance['root_gb']
+ new_gb = instance_type['root_gb']
+ LOG.debug(_("Resizing down VDI %(cow_uuid)s from "
+ "%(old_gb)dGB to %(new_gb)dGB"), locals(),
+ instance=instance)
- """
- # 0. Zero out the progress to begin
+ # 2. Power down the instance before resizing
+ vm_utils.shutdown_vm(
+ self._session, instance, vm_ref, hard=False)
self._update_instance_progress(context, instance,
- step=0,
+ step=2,
total_steps=RESIZE_TOTAL_STEPS)
- vm_ref = self._get_vm_opaque_ref(instance)
+ # 3. Copy VDI, resize partition and filesystem, forget VDI,
+ # truncate VHD
+ vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
+ self._session, vm_ref)
+ new_ref, new_uuid = vm_utils.resize_disk(self._session,
+ instance,
+ vdi_ref,
+ instance_type)
+ self._update_instance_progress(context, instance,
+ step=3,
+ total_steps=RESIZE_TOTAL_STEPS)
- # The primary VDI becomes the COW after the snapshot, and we can
- # identify it via the VBD. The base copy is the parent_uuid returned
- # from the snapshot creation
+ # 4. Transfer the new VHD
+ self._migrate_vhd(instance, new_uuid, dest, sr_path, 0)
+ self._update_instance_progress(context, instance,
+ step=4,
+ total_steps=RESIZE_TOTAL_STEPS)
- base_copy_uuid = cow_uuid = None
+ # Clean up VDI now that it's been copied
+ vm_utils.destroy_vdi(self._session, new_ref)
+ def _migrate_disk_resizing_up(self, context, instance, dest, vm_ref,
+ sr_path):
# 1. Create Snapshot
label = "%s-snapshot" % instance.name
with vm_utils.snapshot_attached_here(
@@ -679,124 +703,71 @@ class VMOps(object):
step=1,
total_steps=RESIZE_TOTAL_STEPS)
- # FIXME(sirp): this needs to work with VDI chain of arbitrary
- # length
- base_copy_uuid = vdi_uuids[1]
- _vdi_info = vm_utils.get_vdi_for_vm_safely(self._session, vm_ref)
- vdi_ref, vm_vdi_rec = _vdi_info
- cow_uuid = vm_vdi_rec['uuid']
-
- sr_path = vm_utils.get_sr_path(self._session)
-
- if (instance['auto_disk_config'] and
- instance['root_gb'] > instance_type['root_gb']):
- # Resizing disk storage down
- old_gb = instance['root_gb']
- new_gb = instance_type['root_gb']
-
- LOG.debug(_("Resizing down VDI %(cow_uuid)s from "
- "%(old_gb)dGB to %(new_gb)dGB"), locals(),
- instance=instance)
-
- # 2. Power down the instance before resizing
- vm_utils.shutdown_vm(
- self._session, instance, vm_ref, hard=False)
+ # 2. Transfer the immutable VHDs (base-copies)
+ #
+ # The first VHD will be the leaf (aka COW) that is being used by
+ # the VM. For this step, we're only interested in the immutable
+ # VHDs which are all of the parents of the leaf VHD.
+ for seq_num, vdi_uuid in itertools.islice(
+ enumerate(vdi_uuids), 1, None):
+ self._migrate_vhd(instance, vdi_uuid, dest, sr_path, seq_num)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
- # 3. Copy VDI, resize partition and filesystem, forget VDI,
- # truncate VHD
- new_ref, new_uuid = vm_utils.resize_disk(self._session,
- instance,
- vdi_ref,
- instance_type)
- self._update_instance_progress(context, instance,
- step=3,
- total_steps=RESIZE_TOTAL_STEPS)
-
- # 4. Transfer the new VHD
- self._migrate_vhd(instance, new_uuid, dest, sr_path)
- self._update_instance_progress(context, instance,
- step=4,
- total_steps=RESIZE_TOTAL_STEPS)
-
- # Clean up VDI now that it's been copied
- vm_utils.destroy_vdi(self._session, new_ref)
-
- vdis = {'base_copy': new_uuid}
- else:
- # Resizing disk storage up, will be handled on destination
-
- # As an optimization, we transfer the base VDI first,
- # then shut down the VM, followed by transfering the COW
- # VDI.
-
- # 2. Transfer the base copy
- self._migrate_vhd(instance, base_copy_uuid, dest, sr_path)
- self._update_instance_progress(context, instance,
- step=2,
- total_steps=RESIZE_TOTAL_STEPS)
-
- # 3. Now power down the instance
- vm_utils.shutdown_vm(
- self._session, instance, vm_ref, hard=False)
- self._update_instance_progress(context, instance,
- step=3,
- total_steps=RESIZE_TOTAL_STEPS)
-
- # 4. Transfer the COW VHD
- self._migrate_vhd(instance, cow_uuid, dest, sr_path)
- self._update_instance_progress(context, instance,
- step=4,
- total_steps=RESIZE_TOTAL_STEPS)
-
- # TODO(mdietz): we could also consider renaming these to
- # something sensible so we don't need to blindly pass
- # around dictionaries
- vdis = {'base_copy': base_copy_uuid, 'cow': cow_uuid}
-
- # NOTE(sirp): in case we're resizing to the same host (for dev
- # purposes), apply a suffix to name-label so the two VM records
- # extant until a confirm_resize don't collide.
- name_label = self._get_orig_vm_name_label(instance)
- vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
+ # 3. Now power down the instance
+ vm_utils.shutdown_vm(
+ self._session, instance, vm_ref, hard=False)
+ self._update_instance_progress(context, instance,
+ step=3,
+ total_steps=RESIZE_TOTAL_STEPS)
- return vdis
+ # 4. Transfer the COW VHD
+ vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
+ self._session, vm_ref)
+ cow_uuid = vm_vdi_rec['uuid']
+ self._migrate_vhd(instance, cow_uuid, dest, sr_path, 0)
+ self._update_instance_progress(context, instance,
+ step=4,
+ total_steps=RESIZE_TOTAL_STEPS)
- def _move_disks(self, instance, disk_info):
- """Move and possibly link VHDs via the XAPI plugin."""
- base_copy_uuid = disk_info['base_copy']
- new_base_copy_uuid = str(uuid.uuid4())
+ def migrate_disk_and_power_off(self, context, instance, dest,
+ instance_type):
+ """Copies a VHD from one host machine to another, possibly
+ resizing filesystem before hand.
- params = {'instance_uuid': instance['uuid'],
- 'sr_path': vm_utils.get_sr_path(self._session),
- 'old_base_copy_uuid': base_copy_uuid,
- 'new_base_copy_uuid': new_base_copy_uuid}
+ :param instance: the instance that owns the VHD in question.
+ :param dest: the destination host machine.
+ :param instance_type: instance_type to resize to
+ """
+ vm_ref = self._get_vm_opaque_ref(instance)
+ sr_path = vm_utils.get_sr_path(self._session)
+ resize_down = (instance['auto_disk_config'] and
+ instance['root_gb'] > instance_type['root_gb'])
- if 'cow' in disk_info:
- cow_uuid = disk_info['cow']
- new_cow_uuid = str(uuid.uuid4())
- params['old_cow_uuid'] = cow_uuid
- params['new_cow_uuid'] = new_cow_uuid
+ # 0. Zero out the progress to begin
+ self._update_instance_progress(context, instance,
+ step=0,
+ total_steps=RESIZE_TOTAL_STEPS)
- new_uuid = new_cow_uuid
+ if resize_down:
+ self._migrate_disk_resizing_down(
+ context, instance, dest, instance_type, vm_ref, sr_path)
else:
- new_uuid = new_base_copy_uuid
-
- self._session.call_plugin('migration', 'move_vhds_into_sr',
- {'params': pickle.dumps(params)})
+ self._migrate_disk_resizing_up(
+ context, instance, dest, vm_ref, sr_path)
- # Now we rescan the SR so we find the VHDs
- vm_utils.scan_default_sr(self._session)
-
- # Set name-label so we can find if we need to clean up a failed
- # migration
- vm_utils.set_vdi_name(self._session, new_uuid, instance.name, 'root')
-
- new_ref = self._session.call_xenapi('VDI.get_by_uuid', new_uuid)
+ # NOTE(sirp): in case we're resizing to the same host (for dev
+ # purposes), apply a suffix to name-label so the two VM records
+ # extant until a confirm_resize don't collide.
+ name_label = self._get_orig_vm_name_label(instance)
+ vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
- return {'uuid': new_uuid, 'ref': new_ref}
+ # NOTE(sirp): disk_info isn't used by the xenapi driver, instead it
+ # uses a staging-area (/images/instance<uuid>) and sequence-numbered
+ # VHDs to figure out how to reconstruct the VDI chain after syncing
+ disk_info = {}
+ return disk_info
def _resize_instance(self, instance, root_vdi):
"""Resize an instances root disk."""