summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorRenuka Apte <renuka.apte@citrix.com>2012-06-04 13:48:34 -0700
committerGerrit Code Review <review@openstack.org>2012-07-09 17:59:36 +0000
commited118dfd6ebe06a095620e93f4318c4e7ac9cfed (patch)
treee00d5cd00b2cf842cc0341860e51823e22ea0367 /nova
parenta97de51e017c9c07eaa3e4a9ddde4193e9528373 (diff)
downloadnova-ed118dfd6ebe06a095620e93f4318c4e7ac9cfed.tar.gz
nova-ed118dfd6ebe06a095620e93f4318c4e7ac9cfed.tar.xz
nova-ed118dfd6ebe06a095620e93f4318c4e7ac9cfed.zip
Boot from volume for Xen
Implements bp xenapi-boot-from-volume Ensure boot from volume works for XenAPI (tested using XenServer). 1. Add strip_prefix to block_device to make the command mountpoint agnostic. 2. Pass block device and delete on terminate information to driver layer. 3. Add ability to recognize and boot from the specified nova volume. Change-Id: If117087086eab809217d2b173f921bf9319a52c7
Diffstat (limited to 'nova')
-rw-r--r--nova/block_device.py9
-rw-r--r--nova/compute/manager.py14
-rw-r--r--nova/tests/test_block_device.py6
-rw-r--r--nova/virt/xenapi/connection.py5
-rw-r--r--nova/virt/xenapi/vm_utils.py80
-rw-r--r--nova/virt/xenapi/vmops.py62
6 files changed, 141 insertions, 35 deletions
diff --git a/nova/block_device.py b/nova/block_device.py
index 1b7d38145..caa521e83 100644
--- a/nova/block_device.py
+++ b/nova/block_device.py
@@ -72,3 +72,12 @@ _dev = re.compile('^/dev/')
def strip_dev(device_name):
"""remove leading '/dev/'"""
return _dev.sub('', device_name)
+
+
+_pref = re.compile('^((x?v|s)d)')
+
+
+def strip_prefix(device_name):
+ """ remove both leading /dev/ and xvd or sd or vd """
+ device_name = strip_dev(device_name)
+ return _pref.sub('', device_name)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index aacfdf7a7..7720b3190 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -430,9 +430,10 @@ class ComputeManager(manager.SchedulerDependentManager):
self.db.block_device_mapping_update(
context, bdm['id'],
{'connection_info': jsonutils.dumps(cinfo)})
- block_device_mapping.append({'connection_info': cinfo,
- 'mount_device':
- bdm['device_name']})
+ bdmap = {'connection_info': cinfo,
+ 'mount_device': bdm['device_name'],
+ 'delete_on_termination': bdm['delete_on_termination']}
+ block_device_mapping.append(bdmap)
return {
'root_device_name': instance['root_device_name'],
@@ -685,9 +686,10 @@ class ComputeManager(manager.SchedulerDependentManager):
for bdm in bdms:
try:
cinfo = jsonutils.loads(bdm['connection_info'])
- block_device_mapping.append({'connection_info': cinfo,
- 'mount_device':
- bdm['device_name']})
+ bdmap = {'connection_info': cinfo,
+ 'mount_device': bdm['device_name'],
+ 'delete_on_termination': bdm['delete_on_termination']}
+ block_device_mapping.append(bdmap)
except TypeError:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
diff --git a/nova/tests/test_block_device.py b/nova/tests/test_block_device.py
index b8e9b35e2..6a77d98ae 100644
--- a/nova/tests/test_block_device.py
+++ b/nova/tests/test_block_device.py
@@ -85,3 +85,9 @@ class BlockDeviceTestCase(test.TestCase):
def test_strip_dev(self):
self.assertEqual(block_device.strip_dev('/dev/sda'), 'sda')
self.assertEqual(block_device.strip_dev('sda'), 'sda')
+
+ def test_strip_prefix(self):
+ self.assertEqual(block_device.strip_prefix('/dev/sda'), 'a')
+ self.assertEqual(block_device.strip_prefix('a'), 'a')
+ self.assertEqual(block_device.strip_prefix('xvda'), 'a')
+ self.assertEqual(block_device.strip_prefix('vda'), 'a')
diff --git a/nova/virt/xenapi/connection.py b/nova/virt/xenapi/connection.py
index 6db5e5b73..41c54127e 100644
--- a/nova/virt/xenapi/connection.py
+++ b/nova/virt/xenapi/connection.py
@@ -176,7 +176,8 @@ class XenAPIDriver(driver.ComputeDriver):
def spawn(self, context, instance, image_meta,
network_info=None, block_device_info=None):
"""Create VM instance"""
- self._vmops.spawn(context, instance, image_meta, network_info)
+ self._vmops.spawn(context, instance, image_meta, network_info,
+ block_device_info)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM"""
@@ -214,7 +215,7 @@ class XenAPIDriver(driver.ComputeDriver):
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy VM instance"""
- self._vmops.destroy(instance, network_info)
+ self._vmops.destroy(instance, network_info, block_device_info)
def pause(self, instance):
"""Pause VM instance"""
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 241b0da68..8a5157542 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -34,6 +34,7 @@ from xml.parsers import expat
from eventlet import greenthread
+from nova import block_device
from nova.compute import instance_types
from nova.compute import power_state
from nova import db
@@ -46,6 +47,7 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk import api as disk
+from nova.virt import driver
from nova.virt import xenapi
from nova.virt.xenapi import volume_utils
@@ -344,6 +346,55 @@ def create_vdi(session, sr_ref, info, disk_type, virtual_size,
return vdi_ref
+def get_vdis_for_boot_from_vol(session, instance, dev_params):
+ vdis = {}
+ sr_uuid = dev_params['sr_uuid']
+ sr_ref = volume_utils.find_sr_by_uuid(session,
+ sr_uuid)
+ if sr_ref:
+ session.call_xenapi("SR.scan", sr_ref)
+ return {'root': dict(uuid=dev_params['vdi_uuid'],
+ file=None)}
+ return vdis
+
+
+def _volume_in_mapping(mount_device, block_device_info):
+ block_device_list = [block_device.strip_prefix(vol['mount_device'])
+ for vol in
+ driver.block_device_info_get_mapping(
+ block_device_info)]
+ swap = driver.block_device_info_get_swap(block_device_info)
+ if driver.swap_is_usable(swap):
+ swap_dev = swap['device_name']
+ block_device_list.append(block_device.strip_prefix(swap_dev))
+ block_device_list += [block_device.strip_prefix(ephemeral['device_name'])
+ for ephemeral in
+ driver.block_device_info_get_ephemerals(
+ block_device_info)]
+ LOG.debug(_("block_device_list %s"), block_device_list)
+ return block_device.strip_prefix(mount_device) in block_device_list
+
+
+def get_vdis_for_instance(context, session, instance, image,
+ image_type,
+ block_device_info=None):
+ if block_device_info:
+ LOG.debug(_("block device info: %s"), block_device_info)
+ rootdev = block_device_info['root_device_name']
+ if _volume_in_mapping(rootdev, block_device_info):
+ # call function to return the vdi in connection info of block
+ # device.
+ # make it a point to return from here.
+ bdm_root_dev = block_device_info['block_device_mapping'][0]
+ dev_params = bdm_root_dev['connection_info']['data']
+ LOG.debug(dev_params)
+ return get_vdis_for_boot_from_vol(session,
+ instance,
+ dev_params)
+ return create_image(context, session, instance, image,
+ image_type)
+
+
def copy_vdi(session, sr_ref, vdi_to_copy_ref):
"""Copy a VDI and return the new VDIs reference."""
vdi_ref = session.call_xenapi('VDI.copy', vdi_to_copy_ref, sr_ref)
@@ -1036,19 +1087,7 @@ def list_vms(session):
yield vm_ref, vm_rec
-def lookup(session, name_label):
- """Look the instance up and return it if available"""
- vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
- n = len(vm_refs)
- if n == 0:
- return None
- elif n > 1:
- raise exception.InstanceExists(name=name_label)
- else:
- return vm_refs[0]
-
-
-def lookup_vm_vdis(session, vm_ref):
+def lookup_vm_vdis(session, vm_ref, nodestroys=None):
"""Look for the VDIs that are attached to the VM"""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
@@ -1064,10 +1103,23 @@ def lookup_vm_vdis(session, vm_ref):
except session.XenAPI.Failure, exc:
LOG.exception(exc)
else:
- vdi_refs.append(vdi_ref)
+ if not nodestroys or record['uuid'] not in nodestroys:
+ vdi_refs.append(vdi_ref)
return vdi_refs
+def lookup(session, name_label):
+ """Look the instance up and return it if available"""
+ vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
+ n = len(vm_refs)
+ if n == 0:
+ return None
+ elif n > 1:
+ raise exception.InstanceExists(name=name_label)
+ else:
+ return vm_refs[0]
+
+
def preconfigure_instance(session, instance, vdi_ref, network_info):
"""Makes alterations to the image before launching as part of spawn.
"""
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 5d445e0a6..19c6c0b60 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -161,6 +161,7 @@ class VMOps(object):
self.firewall_driver = fw_class(xenapi_session=self._session)
vif_impl = importutils.import_class(FLAGS.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
+ self.default_root_dev = '/dev/sda'
def list_instances(self):
"""List VM instances."""
@@ -230,12 +231,13 @@ class VMOps(object):
self._session.get_xenapi_host(),
False, False)
- def _create_disks(self, context, instance, image_meta):
+ def _create_disks(self, context, instance, image_meta,
+ block_device_info=None):
disk_image_type = vm_utils.determine_disk_image_type(image_meta)
- vdis = vm_utils.create_image(context, self._session,
- instance, instance.image_ref,
- disk_image_type)
-
+ vdis = vm_utils.get_vdis_for_instance(context, self._session,
+ instance, instance.image_ref,
+ disk_image_type,
+ block_device_info=block_device_info)
# Just get the VDI ref once
for vdi in vdis.itervalues():
vdi['ref'] = self._session.call_xenapi('VDI.get_by_uuid',
@@ -247,7 +249,8 @@ class VMOps(object):
return vdis
- def spawn(self, context, instance, image_meta, network_info):
+ def spawn(self, context, instance, image_meta, network_info,
+ block_device_info=None):
step = make_step_decorator(context, instance)
@step
@@ -262,7 +265,8 @@ class VMOps(object):
@step
def create_disks_step(undo_mgr):
- vdis = self._create_disks(context, instance, image_meta)
+ vdis = self._create_disks(context, instance, image_meta,
+ block_device_info)
def undo_create_disks():
self._safe_destroy_vdis([vdi['ref'] for vdi in vdis.values()])
@@ -331,8 +335,17 @@ class VMOps(object):
def apply_security_group_filters_step(undo_mgr):
self.firewall_driver.apply_instance_filter(instance, network_info)
+ @step
+ def bdev_set_default_root(undo_mgr):
+ if block_device_info:
+ LOG.debug(_("Block device information present: %s")
+ % block_device_info, instance=instance)
+ if block_device_info and not block_device_info['root_device_name']:
+ block_device_info['root_device_name'] = self.default_root_dev
+
undo_mgr = utils.UndoManager()
try:
+ bdev_set_default_root(undo_mgr)
vanity_step(undo_mgr)
vdis = create_disks_step(undo_mgr)
@@ -1003,6 +1016,30 @@ class VMOps(object):
raise exception.NotFound(_("Unable to find root VBD/VDI for VM"))
+ def _destroy_vdis(self, instance, vm_ref, block_device_info=None):
+ """Destroys all VDIs associated with a VM."""
+ instance_uuid = instance['uuid']
+ LOG.debug(_("Destroying VDIs for Instance %(instance_uuid)s")
+ % locals())
+ nodestroy = []
+ if block_device_info:
+ for bdm in block_device_info['block_device_mapping']:
+ LOG.debug(bdm)
+ # bdm vols should be left alone if delete_on_termination
+ # is false, or they will be destroyed on cleanup_volumes
+ nodestroy.append(bdm['connection_info']['data']['vdi_uuid'])
+
+ vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref, nodestroy)
+
+ if not vdi_refs:
+ return
+
+ for vdi_ref in vdi_refs:
+ try:
+ vm_utils.destroy_vdi(self._session, vdi_ref)
+ except volume_utils.StorageError as exc:
+ LOG.error(exc)
+
def _safe_destroy_vdis(self, vdi_refs):
"""Destroys the requested VDIs, logging any StorageError exceptions."""
for vdi_ref in vdi_refs:
@@ -1070,7 +1107,7 @@ class VMOps(object):
# Destroy Rescue VM
self._session.call_xenapi("VM.destroy", rescue_vm_ref)
- def destroy(self, instance, network_info):
+ def destroy(self, instance, network_info, block_device_info=None):
"""Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
@@ -1089,10 +1126,11 @@ class VMOps(object):
if rescue_vm_ref:
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
- return self._destroy(instance, vm_ref, network_info)
+ return self._destroy(instance, vm_ref, network_info,
+ block_device_info=block_device_info)
def _destroy(self, instance, vm_ref, network_info=None,
- destroy_kernel_ramdisk=True):
+ destroy_kernel_ramdisk=True, block_device_info=None):
"""Destroys VM instance by performing:
1. A shutdown
@@ -1109,9 +1147,7 @@ class VMOps(object):
self._shutdown(instance, vm_ref)
# Destroy VDIs
- vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref)
- self._safe_destroy_vdis(vdi_refs)
-
+ self._destroy_vdis(instance, vm_ref, block_device_info)
if destroy_kernel_ramdisk:
self._destroy_kernel_ramdisk(instance, vm_ref)