summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Erdfelt <johannes.erdfelt@rackspace.com>2012-05-30 23:23:14 +0000
committerJohannes Erdfelt <johannes.erdfelt@rackspace.com>2012-05-31 23:16:55 +0000
commitd1d99b3de22853d87548ba2a8fa680012945f960 (patch)
tree1aa67ffdbfa391742beb08ac4b9efc7beb3d6f1d
parent0dd15a784482f92ad5aca631c1dc26012b62e3eb (diff)
XenAPI: Don't hardcode userdevice for VBDs
Cleanup and refactor the way VBDs are allocated so it's clearer what userdevice each VDI is allocated to. Also, use a dict of VDIs instead of a list since it's nonsensical to have multiple VDIs of any type and simplifies the code somewhat. Change-Id: I46d6215dbd90822970a874af66f22c9a34529a40
-rw-r--r--nova/tests/test_xenapi.py33
-rw-r--r--nova/tests/xenapi/stubs.py15
-rw-r--r--nova/virt/xenapi/fake.py13
-rw-r--r--nova/virt/xenapi/vm_utils.py111
-rw-r--r--nova/virt/xenapi/vmops.py155
5 files changed, 158 insertions, 169 deletions
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 6572f6304..9314c947f 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -876,7 +876,8 @@ class XenAPIMigrateInstance(test.TestCase):
conn = xenapi_conn.get_connection(False)
vdi_ref = xenapi_fake.create_vdi('hurr', 'fake')
vdi_uuid = xenapi_fake.get_record('VDI', vdi_ref)['uuid']
- conn._vmops._resize_instance(instance, vdi_uuid)
+ conn._vmops._resize_instance(instance,
+ {'uuid': vdi_uuid, 'ref': vdi_ref})
self.assertEqual(called['resize'], True)
def test_migrate_disk_and_power_off(self):
@@ -1170,14 +1171,18 @@ class XenAPIAutoDiskConfigTestCase(test.TestCase):
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
- instance = db.instance_create(self.context, self.instance_values)
+ ctx = context.RequestContext(self.user_id, self.project_id)
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+
disk_image_type = vm_utils.ImageType.DISK_VHD
- vm_ref = "blah"
- first_vdi_ref = "blah"
- vdis = ["blah"]
+ instance = db.instance_create(self.context, self.instance_values)
+ vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
+ vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
- self.conn._vmops._attach_disks(
- instance, disk_image_type, vm_ref, first_vdi_ref, vdis)
+ vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
+ vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
+
+ self.conn._vmops._attach_disks(instance, disk_image_type, vm_ref, vdis)
self.assertEqual(marker["partition_called"], called)
@@ -1256,14 +1261,18 @@ class XenAPIGenerateLocal(test.TestCase):
fake_create_vbd)
def assertCalled(self, instance):
+ ctx = context.RequestContext(self.user_id, self.project_id)
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+
disk_image_type = vm_utils.ImageType.DISK_VHD
- vm_ref = "blah"
- first_vdi_ref = "blah"
- vdis = ["blah"]
+ vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
+ vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
+
+ vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
+ vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.called = False
- self.conn._vmops._attach_disks(instance, disk_image_type,
- vm_ref, first_vdi_ref, vdis)
+ self.conn._vmops._attach_disks(instance, disk_image_type, vm_ref, vdis)
self.assertTrue(self.called)
def test_generate_swap(self):
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 7486c4d74..e36fd40e9 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -39,7 +39,9 @@ def stubout_firewall_driver(stubs, conn):
def stubout_instance_snapshot(stubs):
@classmethod
def fake_fetch_image(cls, context, session, instance, image, type):
- return [dict(vdi_type='root', vdi_uuid=_make_fake_vdi())]
+ return {'root': dict(uuid=_make_fake_vdi(), file=None),
+ 'kernel': dict(uuid=_make_fake_vdi(), file=None),
+ 'ramdisk': dict(uuid=_make_fake_vdi(), file=None)}
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
@@ -134,9 +136,8 @@ def stubout_fetch_image_glance_disk(stubs, raise_failure=False):
else:
filename = "unknown"
- return [dict(vdi_type=vm_utils.ImageType.to_string(image_type),
- vdi_uuid=None,
- file=filename)]
+ vdi_type = vm_utils.ImageType.to_string(image_type)
+ return {vdi_type: dict(uuid=None, file=filename)}
stubs.Set(vm_utils.VMHelper, '_fetch_image_glance_disk',
_fake_fetch_image_glance_disk)
@@ -338,8 +339,10 @@ def stub_out_migration_methods(stubs):
return 'vm_ref', dict(image='foo', snap='bar')
def fake_move_disks(self, instance, disk_info):
- vdi_ref = fake.create_vdi('new', 'fake')
- return fake.get_record('VDI', vdi_ref)['uuid']
+ vdi_ref = fake.create_vdi(instance['name'], 'fake')
+ vdi_rec = fake.get_record('VDI', vdi_ref)
+ vdi_rec['other_config']['nova_disk_type'] = 'root'
+ return {'uuid': vdi_rec['uuid'], 'ref': vdi_ref}
@classmethod
def fake_get_vdi(cls, session, vm_ref):
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 532c9f70d..3ccd6247d 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -475,19 +475,14 @@ class SessionBase(object):
name_label = db_ref['name_label']
read_only = db_ref['read_only']
sharable = db_ref['sharable']
- vdi_ref = create_vdi(name_label, sr_ref, sharable=sharable,
- read_only=read_only)
- return vdi_ref
+ other_config = db_ref['other_config'].copy()
+ return create_vdi(name_label, sr_ref, sharable=sharable,
+ read_only=read_only, other_config=other_config)
def VDI_clone(self, _1, vdi_to_clone_ref):
db_ref = _db_content['VDI'][vdi_to_clone_ref]
- name_label = db_ref['name_label']
- read_only = db_ref['read_only']
sr_ref = db_ref['SR']
- sharable = db_ref['sharable']
- vdi_ref = create_vdi(name_label, sr_ref, sharable=sharable,
- read_only=read_only)
- return vdi_ref
+ return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref)
def host_compute_free_memory(self, _1, ref):
#Always return 12GB available
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 508fd34e9..e47002fa8 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -320,20 +320,21 @@ class VMHelper(xenapi.HelperBase):
_('Unable to destroy VDI %s') % vdi_ref)
@classmethod
- def create_vdi(cls, session, sr_ref, instance, name_description,
+ def create_vdi(cls, session, sr_ref, instance, disk_type,
virtual_size, read_only=False):
"""Create a VDI record and returns its reference."""
name_label = instance['name']
vdi_ref = session.call_xenapi("VDI.create",
{'name_label': name_label,
- 'name_description': name_description,
+ 'name_description': disk_type,
'SR': sr_ref,
'virtual_size': str(virtual_size),
'type': 'User',
'sharable': False,
'read_only': read_only,
'xenstore_data': {},
- 'other_config': {'nova_instance_uuid': instance['uuid']},
+ 'other_config': {'nova_instance_uuid': instance['uuid'],
+ 'nova_disk_type': disk_type},
'sm_config': {},
'tags': []})
LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,'
@@ -599,16 +600,15 @@ class VMHelper(xenapi.HelperBase):
return cls.fetch_image(context, session, instance, image,
image_type)
else:
- return [dict(vdi_type=ImageType.to_string(image_type),
- vdi_uuid=None,
- file=filename)]
+ vdi_type = ImageType.to_string(image_type)
+ return {vdi_type: dict(uuid=None, file=filename)}
@classmethod
def _create_cached_image(cls, context, session, instance, image,
image_type):
sr_ref = cls.safe_find_sr(session)
sr_type = session.call_xenapi('SR.get_record', sr_ref)["type"]
- vdi_return_list = []
+ vdis = {}
if FLAGS.use_cow_images and sr_type != "ext":
LOG.warning(_("Fast cloning is only supported on default local SR "
@@ -616,42 +616,48 @@ class VMHelper(xenapi.HelperBase):
"type %(sr_type)s. Ignoring the cow flag.")
% locals())
- vdi_ref = cls.find_cached_image(session, image, sr_ref)
- if vdi_ref is None:
- vdis = cls.fetch_image(context, session, instance, image,
- image_type)
- vdi_ref = session.call_xenapi('VDI.get_by_uuid',
- vdis[0]['vdi_uuid'])
- cls.set_vdi_name(session, vdis[0]['vdi_uuid'],
+ root_vdi_ref = cls.find_cached_image(session, image, sr_ref)
+ if root_vdi_ref is None:
+ fetched_vdis = cls.fetch_image(context, session, instance, image,
+ image_type)
+ root_vdi = fetched_vdis['root']
+ root_vdi_ref = session.call_xenapi('VDI.get_by_uuid',
+ root_vdi['uuid'])
+ cls.set_vdi_name(session, root_vdi['uuid'],
'Glance Image %s' % image, 'root',
- vdi_ref=vdi_ref)
+ vdi_ref=root_vdi_ref)
session.call_xenapi('VDI.add_to_other_config',
- vdi_ref, 'image-id', str(image))
+ root_vdi_ref, 'image-id', str(image))
+
+ for vdi_type, vdi in fetched_vdis.iteritems():
+ vdi_ref = session.call_xenapi('VDI.get_by_uuid',
+ vdi['uuid'])
+ session.call_xenapi('VDI.add_to_other_config',
+ vdi_ref, 'nova_disk_type',
+ vdi_type)
- for vdi in vdis:
- if vdi["vdi_type"] == "swap":
+ if vdi_type == 'swap':
session.call_xenapi('VDI.add_to_other_config',
- vdi_ref, "swap-disk",
- str(vdi['vdi_uuid']))
+ root_vdi_ref, 'swap-disk',
+ str(vdi['uuid']))
if FLAGS.use_cow_images and sr_type == 'ext':
- new_vdi_ref = cls.clone_vdi(session, vdi_ref)
+ new_vdi_ref = cls.clone_vdi(session, root_vdi_ref)
else:
- new_vdi_ref = cls.copy_vdi(session, sr_ref, vdi_ref)
+ new_vdi_ref = cls.copy_vdi(session, sr_ref, root_vdi_ref)
# Set the name label for the image we just created and remove image id
# field from other-config.
session.call_xenapi('VDI.remove_from_other_config',
new_vdi_ref, 'image-id')
- vdi_return_list.append(dict(
- vdi_type=("root" if image_type == ImageType.DISK_VHD
- else ImageType.to_string(image_type)),
- vdi_uuid=session.call_xenapi('VDI.get_uuid', new_vdi_ref),
- file=None))
+ vdi_type = ("root" if image_type == ImageType.DISK_VHD
+ else ImageType.to_string(image_type))
+ vdi_uuid = session.call_xenapi('VDI.get_uuid', new_vdi_ref)
+ vdis[vdi_type] = dict(uuid=vdi_uuid, file=None)
# Create a swap disk if the glance image had one associated with it.
- vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
+ vdi_rec = session.call_xenapi('VDI.get_record', root_vdi_ref)
if 'swap-disk' in vdi_rec['other_config']:
swap_disk_uuid = vdi_rec['other_config']['swap-disk']
swap_vdi_ref = session.call_xenapi('VDI.get_by_uuid',
@@ -659,11 +665,9 @@ class VMHelper(xenapi.HelperBase):
new_swap_vdi_ref = cls.copy_vdi(session, sr_ref, swap_vdi_ref)
new_swap_vdi_uuid = session.call_xenapi('VDI.get_uuid',
new_swap_vdi_ref)
- vdi_return_list.append(dict(vdi_type="swap",
- vdi_uuid=new_swap_vdi_uuid,
- file=None))
+ vdis['swap'] = dict(uuid=new_swap_vdi_uuid, file=None)
- return vdi_return_list
+ return vdis
@classmethod
def create_image(cls, context, session, instance, image, image_type):
@@ -673,22 +677,20 @@ class VMHelper(xenapi.HelperBase):
Returns: A list of dictionaries that describe VDIs
"""
if FLAGS.cache_images is True and image_type != ImageType.DISK_ISO:
- vdi_return_list = cls._create_cached_image(context, session,
- instance, image,
- image_type)
+ vdis = cls._create_cached_image(context, session, instance,
+ image, image_type)
else:
# If caching is disabled, we do not have to keep a copy of the
# image. Fetch the image from glance.
- vdi_return_list = cls.fetch_image(context, session, instance,
- instance.image_ref, image_type)
+ vdis = cls.fetch_image(context, session, instance,
+ instance.image_ref, image_type)
# Set the name label and description to easily identify what
# instance and disk it's for
- for vdi in vdi_return_list:
- cls.set_vdi_name(session, vdi['vdi_uuid'], instance.name,
- vdi['vdi_type'])
+ for vdi_type, vdi in vdis.iteritems():
+ cls.set_vdi_name(session, vdi['uuid'], instance.name, vdi_type)
- return vdi_return_list
+ return vdis
@classmethod
def fetch_image(cls, context, session, instance, image, image_type):
@@ -754,26 +756,29 @@ class VMHelper(xenapi.HelperBase):
instance=instance)
sr_ref = cls.safe_find_sr(session)
- vdis = cls._retry_glance_download_vhd(context, session, image)
+ fetched_vdis = cls._retry_glance_download_vhd(context, session, image)
# 'download_vhd' will return a list of dictionaries describing VDIs.
# The dictionary will contain 'vdi_type' and 'vdi_uuid' keys.
# 'vdi_type' can be 'root' or 'swap' right now.
- for vdi in vdis:
+ for vdi in fetched_vdis:
LOG.debug(_("xapi 'download_vhd' returned VDI of "
"type '%(vdi_type)s' with UUID '%(vdi_uuid)s'"),
vdi, instance=instance)
cls.scan_sr(session, sr_ref)
- # Pull out the UUID of the first VDI (which is the os VDI)
- os_vdi_uuid = vdis[0]['vdi_uuid']
+ vdis = {}
+ for vdi in fetched_vdis:
+ vdis[vdi['vdi_type']] = dict(uuid=vdi['vdi_uuid'], file=None)
+
+ # Pull out the UUID of the root VDI
+ root_vdi_uuid = vdis['root']['uuid']
# Set the name-label to ease debugging
- cls.set_vdi_name(session, os_vdi_uuid, instance.name,
- vdis[0]['vdi_type'])
+ cls.set_vdi_name(session, root_vdi_uuid, instance.name, 'root')
- cls._check_vdi_size(context, session, instance, os_vdi_uuid)
+ cls._check_vdi_size(context, session, instance, root_vdi_uuid)
return vdis
@classmethod
@@ -885,13 +890,11 @@ class VMHelper(xenapi.HelperBase):
cls.destroy_vdi(session, vdi_ref)
LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref,
instance=instance)
- return [dict(vdi_type=ImageType.to_string(image_type),
- vdi_uuid=None,
- file=filename)]
+ vdi_type = ImageType.to_string(image_type)
+ return {vdi_type: dict(uuid=None, file=filename)}
else:
- return [dict(vdi_type=ImageType.to_string(image_type),
- vdi_uuid=vdi_uuid,
- file=None)]
+ vdi_type = ImageType.to_string(image_type)
+ return {vdi_type: dict(uuid=vdi_uuid, file=None)}
except (session.XenAPI.Failure, IOError, OSError) as e:
# We look for XenAPI and OS failures.
LOG.exception(_("Failed to fetch glance image"),
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index c2f2d3b4a..253540260 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -80,6 +80,12 @@ flags.DECLARE('vncserver_proxyclient_address', 'nova.vnc')
RESIZE_TOTAL_STEPS = 5
+DEVICE_ROOT = '0'
+DEVICE_RESCUE = '1'
+DEVICE_SWAP = '2'
+DEVICE_EPHEMERAL = '3'
+DEVICE_CD = '4'
+
def cmp_version(a, b):
"""Compare two version strings (eg 0.0.1.10 > 0.0.1.9)"""
@@ -207,13 +213,13 @@ class VMOps(object):
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance):
- vdi_uuid = self._move_disks(instance, disk_info)
+ root_vdi = self._move_disks(instance, disk_info)
if resize_instance:
- self._resize_instance(instance, vdi_uuid)
+ self._resize_instance(instance, root_vdi)
vm_ref = self._create_vm(context, instance,
- [dict(vdi_type='root', vdi_uuid=vdi_uuid)],
+ {'root': root_vdi},
network_info, image_meta)
# 5. Start VM
@@ -236,9 +242,14 @@ class VMOps(object):
instance, instance.image_ref,
disk_image_type)
- for vdi in vdis:
- if vdi["vdi_type"] == "root":
- self._resize_instance(instance, vdi["vdi_uuid"])
+ # Just get the VDI ref once
+ for vdi in vdis.itervalues():
+ vdi['ref'] = self._session.call_xenapi('VDI.get_by_uuid',
+ vdi['uuid'])
+
+ root_vdi = vdis.get('root')
+ if root_vdi:
+ self._resize_instance(instance, root_vdi)
return vdis
@@ -260,17 +271,7 @@ class VMOps(object):
vdis = self._create_disks(context, instance, image_meta)
def undo_create_disks():
- vdi_refs = []
- for vdi in vdis:
- try:
- vdi_ref = self._session.call_xenapi(
- 'VDI.get_by_uuid', vdi['vdi_uuid'])
- except self.XenAPI.Failure:
- continue
-
- vdi_refs.append(vdi_ref)
-
- self._safe_destroy_vdis(vdi_refs)
+ self._safe_destroy_vdis([vdi['ref'] for vdi in vdis.values()])
undo_mgr.undo_with(undo_create_disks)
return vdis
@@ -281,16 +282,16 @@ class VMOps(object):
ramdisk_file = None
if instance.kernel_id:
- kernel = VMHelper.create_kernel_image(context, self._session,
+ vdis = VMHelper.create_kernel_image(context, self._session,
instance, instance.kernel_id, instance.user_id,
- instance.project_id, vm_utils.ImageType.KERNEL)[0]
- kernel_file = kernel.get('file')
+ instance.project_id, vm_utils.ImageType.KERNEL)
+ kernel_file = vdis['kernel'].get('file')
if instance.ramdisk_id:
- ramdisk = VMHelper.create_kernel_image(context, self._session,
+ vdis = VMHelper.create_kernel_image(context, self._session,
instance, instance.ramdisk_id, instance.user_id,
- instance.project_id, vm_utils.ImageType.RAMDISK)[0]
- ramdisk_file = ramdisk.get('file')
+ instance.project_id, vm_utils.ImageType.RAMDISK)
+ ramdisk_file = vdis['ramdisk'].get('file')
def undo_create_kernel_ramdisk():
if kernel_file or ramdisk_file:
@@ -374,16 +375,6 @@ class VMOps(object):
disk_image_type = VMHelper.determine_disk_image_type(image_meta)
- # NOTE(jk0): Since vdi_type may contain either 'root' or 'swap', we
- # need to ensure that the 'swap' VDI is not chosen as the mount
- # point for file injection.
- first_vdi_ref = None
- for vdi in vdis:
- if vdi.get('vdi_type') != 'swap':
- # Create the VM ref and attach the first disk
- first_vdi_ref = self._session.call_xenapi(
- 'VDI.get_by_uuid', vdi['vdi_uuid'])
-
vm_mode = instance.vm_mode and instance.vm_mode.lower()
if vm_mode == 'pv':
use_pv_kernel = True
@@ -392,7 +383,7 @@ class VMOps(object):
vm_mode = 'hvm' # Normalize
else:
use_pv_kernel = VMHelper.determine_is_pv(self._session,
- first_vdi_ref, disk_image_type, instance.os_type)
+ vdis['root']['ref'], disk_image_type, instance.os_type)
vm_mode = use_pv_kernel and 'pv' or 'hvm'
if instance.vm_mode != vm_mode:
@@ -405,13 +396,12 @@ class VMOps(object):
use_pv_kernel)
# Add disks to VM
- self._attach_disks(instance, disk_image_type, vm_ref, first_vdi_ref,
- vdis)
+ self._attach_disks(instance, disk_image_type, vm_ref, vdis)
# Alter the image before VM start for network injection.
if FLAGS.flat_injected:
VMHelper.preconfigure_instance(self._session, instance,
- first_vdi_ref, network_info)
+ vdis['root']['ref'], network_info)
self._create_vifs(vm_ref, instance, network_info)
self.inject_network_info(instance, network_info, vm_ref)
@@ -421,76 +411,62 @@ class VMOps(object):
return vm_ref
- def _attach_disks(self, instance, disk_image_type, vm_ref, first_vdi_ref,
- vdis):
+ def _attach_disks(self, instance, disk_image_type, vm_ref, vdis):
ctx = nova_context.get_admin_context()
- # device 0 reserved for RW disk
- userdevice = 0
-
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
LOG.debug(_("Detected ISO image type, creating blank VM "
"for install"), instance=instance)
- cd_vdi_ref = first_vdi_ref
- first_vdi_ref = VMHelper.fetch_blank_disk(self._session,
- instance.instance_type_id)
+ cd_vdi = vdis.pop('root')
+ root_vdi = VMHelper.fetch_blank_disk(self._session,
+ instance.instance_type_id)
+ vdis['root'] = root_vdi
- VMHelper.create_vbd(self._session, vm_ref, first_vdi_ref,
- userdevice, bootable=False)
+ VMHelper.create_vbd(self._session, vm_ref, root_vdi['ref'],
+ DEVICE_ROOT, bootable=False)
- # device 1 reserved for rescue disk and we've used '0'
- userdevice = 2
- VMHelper.create_vbd(self._session, vm_ref, cd_vdi_ref,
- userdevice, vbd_type='CD', bootable=True)
-
- # set user device to next free value
- userdevice += 1
+ VMHelper.create_vbd(self._session, vm_ref, cd_vdi['ref'],
+ DEVICE_CD, vbd_type='CD', bootable=True)
else:
+ root_vdi = vdis['root']
+
if instance.auto_disk_config:
LOG.debug(_("Auto configuring disk, attempting to "
"resize partition..."), instance=instance)
instance_type = db.instance_type_get(ctx,
instance.instance_type_id)
VMHelper.auto_configure_disk(self._session,
- first_vdi_ref,
+ root_vdi['ref'],
instance_type['root_gb'])
- VMHelper.create_vbd(self._session, vm_ref, first_vdi_ref,
- userdevice, bootable=True)
+ VMHelper.create_vbd(self._session, vm_ref, root_vdi['ref'],
+ DEVICE_ROOT, bootable=True)
- # set user device to next free value
- # userdevice 1 is reserved for rescue and we've used '0'
- userdevice = 2
+ # Attach (optional) swap disk
+ swap_vdi = vdis.get('swap')
instance_type = db.instance_type_get(ctx, instance.instance_type_id)
swap_mb = instance_type['swap']
generate_swap = swap_mb and FLAGS.xenapi_generate_swap
if generate_swap:
- VMHelper.generate_swap(self._session, instance,
- vm_ref, userdevice, swap_mb)
- userdevice += 1
-
+ VMHelper.generate_swap(self._session, instance, vm_ref,
+ DEVICE_SWAP, swap_mb)
+
+ if swap_vdi:
+ # We won't be using packaged swap VDI, so destroy it
+ VMHelper.destroy_vdi(self._session, swap_vdi['ref'])
+ elif swap_vdi:
+ # Attach packaged swap VDI to VM
+ VMHelper.create_vbd(self._session, vm_ref, swap_vdi['ref'],
+ DEVICE_SWAP, bootable=False)
+
+ # Attach (optional) ephemeral disk
ephemeral_gb = instance_type['ephemeral_gb']
if ephemeral_gb:
- VMHelper.generate_ephemeral(self._session, instance,
- vm_ref, userdevice, ephemeral_gb)
- userdevice += 1
-
- # Attach any other disks
- for vdi in vdis[1:]:
- vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
- vdi['vdi_uuid'])
-
- if generate_swap and vdi['vdi_type'] == 'swap':
- # We won't be using it, so don't let it leak
- VMHelper.destroy_vdi(self._session, vdi_ref)
- continue
-
- VMHelper.create_vbd(self._session, vm_ref, vdi_ref,
- userdevice, bootable=False)
- userdevice += 1
+ VMHelper.generate_ephemeral(self._session, instance, vm_ref,
+ DEVICE_EPHEMERAL, ephemeral_gb)
def _boot_new_instance(self, instance, vm_ref):
"""Boot a new instance and configure it."""
@@ -834,9 +810,11 @@ class VMOps(object):
# migration
VMHelper.set_vdi_name(self._session, new_uuid, instance.name, 'root')
- return new_uuid
+ new_ref = self._session.call_xenapi('VDI.get_by_uuid', new_uuid)
+
+ return {'uuid': new_uuid, 'ref': new_ref}
- def _resize_instance(self, instance, vdi_uuid):
+ def _resize_instance(self, instance, root_vdi):
"""Resize an instances root disk."""
new_disk_size = instance.root_gb * 1024 * 1024 * 1024
@@ -844,9 +822,8 @@ class VMOps(object):
return
# Get current size of VDI
- vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
virtual_size = self._session.call_xenapi('VDI.get_virtual_size',
- vdi_ref)
+ root_vdi['ref'])
virtual_size = int(virtual_size)
old_gb = virtual_size / (1024 * 1024 * 1024)
@@ -854,13 +831,14 @@ class VMOps(object):
if virtual_size < new_disk_size:
# Resize up. Simple VDI resize will do the trick
+ vdi_uuid = root_vdi['uuid']
LOG.debug(_("Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to "
"%(new_gb)dGB"), locals(), instance=instance)
if self._session.product_version[0] > 5:
resize_func_name = 'VDI.resize'
else:
resize_func_name = 'VDI.resize_online'
- self._session.call_xenapi(resize_func_name, vdi_ref,
+ self._session.call_xenapi(resize_func_name, root_vdi['ref'],
str(new_disk_size))
LOG.debug(_("Resize complete"), instance=instance)
@@ -1016,7 +994,7 @@ class VMOps(object):
for vbd_uuid in vbd_refs:
vbd = self._session.call_xenapi("VBD.get_record", vbd_uuid)
- if vbd["userdevice"] == "0":
+ if vbd["userdevice"] == DEVICE_ROOT:
return vbd["VDI"]
raise exception.NotFound(_("Unable to find root VBD/VDI for VM"))
@@ -1193,7 +1171,8 @@ class VMOps(object):
vdi_ref = self._find_root_vdi_ref(vm_ref)
rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref,
- vdi_ref, 1, bootable=False)
+ vdi_ref, DEVICE_RESCUE,
+ bootable=False)
self._session.call_xenapi('VBD.plug', rescue_vbd_ref)
def unrescue(self, instance):