diff options
| -rw-r--r-- | Authors | 1 | ||||
| -rw-r--r-- | nova/api/ec2/cloud.py | 1 | ||||
| -rw-r--r-- | nova/api/openstack/servers.py | 56 | ||||
| -rw-r--r-- | nova/compute/api.py | 1 | ||||
| -rw-r--r-- | nova/compute/instance_types.py | 2 | ||||
| -rw-r--r-- | nova/tests/glance/stubs.py | 40 | ||||
| -rw-r--r-- | nova/tests/test_compute.py | 7 | ||||
| -rw-r--r-- | nova/tests/test_xenapi.py | 72 | ||||
| -rw-r--r-- | nova/tests/xenapi/stubs.py | 6 | ||||
| -rw-r--r-- | nova/virt/xenapi/vm_utils.py | 216 | ||||
| -rw-r--r-- | nova/virt/xenapi/vmops.py | 114 | ||||
| -rw-r--r-- | nova/virt/xenapi_conn.py | 2 | ||||
| -rw-r--r-- | plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 393 | ||||
| -rwxr-xr-x | run_tests.sh | 2 |
14 files changed, 718 insertions, 195 deletions
@@ -47,6 +47,7 @@ MORITA Kazutaka <morita.kazutaka@gmail.com> Muneyuki Noguchi <noguchimn@nttdata.co.jp> Nachi Ueno <ueno.nachi@lab.ntt.co.jp> Naveed Massjouni <naveed.massjouni@rackspace.com> +Nirmal Ranganathan <nirmal.ranganathan@rackspace.com> Paul Voccio <paul@openstack.org> Ricardo Carrillo Cruz <emaildericky@gmail.com> Rick Clark <rick@openstack.org> diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 7458d307a..844ccbe5e 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -890,7 +890,6 @@ class CloudController(object): raise exception.ApiError(_('attribute not supported: %s') % attribute) try: - image = self.image_service.show(context, image_id) image = self._format_image(context, self.image_service.show(context, image_id)) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 4da5934cb..d5c79a1c0 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -50,7 +50,8 @@ def _translate_detail_keys(inst): power_state.PAUSED: 'paused', power_state.SHUTDOWN: 'active', power_state.SHUTOFF: 'active', - power_state.CRASHED: 'error'} + power_state.CRASHED: 'error', + power_state.FAILED: 'error'} inst_dict = {} mapped_keys = dict(status='state', imageId='image_id', @@ -135,25 +136,6 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() - def _get_kernel_ramdisk_from_image(self, req, image_id): - """ - Machine images are associated with Kernels and Ramdisk images via - metadata stored in Glance as 'image_properties' - """ - def lookup(param): - _image_id = image_id - try: - return image['properties'][param] - except KeyError: - LOG.debug( - _("%(param)s property not found for image %(_image_id)s") % - locals()) - return None - - image_id = str(image_id) - image = self._image_service.show(req.environ['nova.context'], image_id) - return lookup('kernel_id'), lookup('ramdisk_id') - def create(self, req): """ Creates a new server for a given user """ env = self._deserialize(req.body, req) @@ -425,3 +407,37 @@ class Controller(wsgi.Controller): action=item.action, error=item.error)) return dict(actions=actions) + + def _get_kernel_ramdisk_from_image(self, req, image_id): + """Retrevies kernel and ramdisk IDs from Glance + + Only 'machine' (ami) type use kernel and ramdisk outside of the + image. + """ + # FIXME(sirp): Since we're retrieving the kernel_id from an + # image_property, this means only Glance is supported. + # The BaseImageService needs to expose a consistent way of accessing + # kernel_id and ramdisk_id + image = self._image_service.show(req.environ['nova.context'], image_id) + + if image['status'] != 'active': + raise exception.Invalid( + _("Cannot build from image %(image_id)s, status not active") % + locals()) + + if image['type'] != 'machine': + return None, None + + try: + kernel_id = image['properties']['kernel_id'] + except KeyError: + raise exception.NotFound( + _("Kernel not found for image %(image_id)s") % locals()) + + try: + ramdisk_id = image['properties']['ramdisk_id'] + except KeyError: + raise exception.NotFound( + _("Ramdisk not found for image %(image_id)s") % locals()) + + return kernel_id, ramdisk_id diff --git a/nova/compute/api.py b/nova/compute/api.py index 927dc062d..52d30c7f7 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -129,6 +129,7 @@ class API(base.Base): kernel_id = image.get('kernel_id', None) if ramdisk_id is None: ramdisk_id = image.get('ramdisk_id', None) + # FIXME(sirp): is there a way we can remove null_kernel? # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index 309313fd0..7a2a5baa3 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -45,6 +45,6 @@ def get_by_type(instance_type): def get_by_flavor_id(flavor_id): for instance_type, details in INSTANCE_TYPES.iteritems(): - if details['flavorid'] == flavor_id: + if details['flavorid'] == int(flavor_id): return instance_type return FLAGS.default_instance_type diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index f182b857a..3ff8d7ce5 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -26,12 +26,40 @@ def stubout_glance_client(stubs, cls): class FakeGlance(object): + IMAGE_MACHINE = 1 + IMAGE_KERNEL = 2 + IMAGE_RAMDISK = 3 + IMAGE_RAW = 4 + IMAGE_VHD = 5 + + IMAGE_FIXTURES = { + IMAGE_MACHINE: { + 'image_meta': {'name': 'fakemachine', 'size': 0, + 'type': 'machine'}, + 'image_data': StringIO.StringIO('')}, + IMAGE_KERNEL: { + 'image_meta': {'name': 'fakekernel', 'size': 0, + 'type': 'kernel'}, + 'image_data': StringIO.StringIO('')}, + IMAGE_RAMDISK: { + 'image_meta': {'name': 'fakeramdisk', 'size': 0, + 'type': 'ramdisk'}, + 'image_data': StringIO.StringIO('')}, + IMAGE_RAW: { + 'image_meta': {'name': 'fakeraw', 'size': 0, + 'type': 'raw'}, + 'image_data': StringIO.StringIO('')}, + IMAGE_VHD: { + 'image_meta': {'name': 'fakevhd', 'size': 0, + 'type': 'vhd'}, + 'image_data': StringIO.StringIO('')}} + def __init__(self, host, port=None, use_ssl=False): pass - def get_image(self, image): - meta = { - 'size': 0, - } - image_file = StringIO.StringIO('') - return meta, image_file + def get_image_meta(self, image_id): + return self.IMAGE_FIXTURES[image_id]['image_meta'] + + def get_image(self, image_id): + image = self.IMAGE_FIXTURES[image_id] + return image['image_meta'], image['image_data'] diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index abd4cee5f..58493d7ac 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -30,6 +30,7 @@ from nova import log as logging from nova import test from nova import utils from nova.auth import manager +from nova.compute import instance_types LOG = logging.getLogger('nova.tests.compute') @@ -281,6 +282,10 @@ class ComputeTestCase(test.TestCase): migration_ref['id']) self.compute.terminate_instance(context, instance_id) + def test_get_by_flavor_id(self): + type = instance_types.get_by_flavor_id(1) + self.assertEqual(type, 'm1.tiny') + def test_resize_same_source_fails(self): """Ensure instance fails to migrate when source and destination are the same host""" @@ -289,3 +294,5 @@ class ComputeTestCase(test.TestCase): self.assertRaises(exception.Error, self.compute.prep_resize, self.context, instance_id) self.compute.terminate_instance(self.context, instance_id) + type = instance_types.get_by_flavor_id("1") + self.assertEqual(type, 'm1.tiny') diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index b594538da..9e0e2b0db 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -31,6 +31,7 @@ from nova.compute import power_state from nova.virt import xenapi_conn from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import volume_utils +from nova.virt.xenapi import vm_utils from nova.virt.xenapi.vmops import SimpleDH from nova.virt.xenapi.vmops import VMOps from nova.tests.db import fakes as db_fakes @@ -284,11 +285,17 @@ class XenAPIVMTestCase(test.TestCase): def test_spawn_raw_glance(self): FLAGS.xenapi_image_service = 'glance' - self._test_spawn(1, None, None) + self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None) + + def test_spawn_vhd_glance(self): + FLAGS.xenapi_image_service = 'glance' + self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None) def test_spawn_glance(self): FLAGS.xenapi_image_service = 'glance' - self._test_spawn(1, 2, 3) + self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, + glance_stubs.FakeGlance.IMAGE_KERNEL, + glance_stubs.FakeGlance.IMAGE_RAMDISK) def tearDown(self): super(XenAPIVMTestCase, self).tearDown() @@ -343,6 +350,7 @@ class XenAPIMigrateInstance(test.TestCase): """ Unit test for verifying migration-related actions """ + def setUp(self): super(XenAPIMigrateInstance, self).setUp() self.stubs = stubout.StubOutForTesting() @@ -374,3 +382,63 @@ class XenAPIMigrateInstance(test.TestCase): stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) conn = xenapi_conn.get_connection(False) conn.attach_disk(instance, {'base_copy': 'hurr', 'cow': 'durr'}) + + +class XenAPIDetermineDiskImageTestCase(test.TestCase): + """ + Unit tests for code that detects the ImageType + """ + def setUp(self): + super(XenAPIDetermineDiskImageTestCase, self).setUp() + glance_stubs.stubout_glance_client(self.stubs, + glance_stubs.FakeGlance) + + class FakeInstance(object): + pass + + self.fake_instance = FakeInstance() + self.fake_instance.id = 42 + + def assert_disk_type(self, disk_type): + dt = vm_utils.VMHelper.determine_disk_image_type( + self.fake_instance) + self.assertEqual(disk_type, dt) + + def test_instance_disk(self): + """ + If a kernel is specified then the image type is DISK (aka machine) + """ + FLAGS.xenapi_image_service = 'objectstore' + self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE + self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL + self.assert_disk_type(vm_utils.ImageType.DISK) + + def test_instance_disk_raw(self): + """ + If the kernel isn't specified, and we're not using Glance, then + DISK_RAW is assumed. + """ + FLAGS.xenapi_image_service = 'objectstore' + self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.kernel_id = None + self.assert_disk_type(vm_utils.ImageType.DISK_RAW) + + def test_glance_disk_raw(self): + """ + If we're using Glance, then defer to the image_type field, which in + this case will be 'raw'. + """ + FLAGS.xenapi_image_service = 'glance' + self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.kernel_id = None + self.assert_disk_type(vm_utils.ImageType.DISK_RAW) + + def test_glance_disk_vhd(self): + """ + If we're using Glance, then defer to the image_type field, which in + this case will be 'vhd'. + """ + FLAGS.xenapi_image_service = 'glance' + self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD + self.fake_instance.kernel_id = None + self.assert_disk_type(vm_utils.ImageType.DISK_VHD) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index f22e9a6e9..ddb7ad6db 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -178,6 +178,12 @@ class FakeSessionForVMTests(fake.SessionBase): def VM_destroy(self, session_ref, vm_ref): fake.destroy_vm(vm_ref) + def SR_scan(self, session_ref, sr_ref): + pass + + def VDI_set_name_label(self, session_ref, vdi_ref, name_label): + pass + class FakeSessionForVolumeTests(fake.SessionBase): """ Stubs out a XenAPISession for Volume tests """ diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 870660dea..eea9bb0b9 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -24,6 +24,7 @@ import pickle import re import time import urllib +import uuid from xml.dom import minidom from eventlet import event @@ -63,11 +64,14 @@ class ImageType: 0 - kernel/ramdisk image (goes on dom0's filesystem) 1 - disk image (local SR, partitioned by objectstore plugin) 2 - raw disk image (local SR, NOT partitioned by plugin) + 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for + linux, HVM assumed for Windows) """ KERNEL_RAMDISK = 0 DISK = 1 DISK_RAW = 2 + DISK_VHD = 3 class VMHelper(HelperBase): @@ -292,7 +296,9 @@ class VMHelper(HelperBase): session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) #TODO(sirp): we need to assert only one parent, not parents two deep - return template_vm_ref, [template_vdi_uuid, parent_uuid] + template_vdi_uuids = {'image': parent_uuid, + 'snap': template_vdi_uuid} + return template_vm_ref, template_vdi_uuids @classmethod def get_sr(cls, session, sr_label='slices'): @@ -312,22 +318,26 @@ class VMHelper(HelperBase): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ + # NOTE(sirp): Currently we only support uploading images as VHD, there + # is no RAW equivalent (yet) logging.debug(_("Asking xapi to upload %(vdi_uuids)s as" " ID %(image_id)s") % locals()) params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, 'glance_host': FLAGS.glance_host, - 'glance_port': FLAGS.glance_port} + 'glance_port': FLAGS.glance_port, + 'sr_path': get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} - task = session.async_call_plugin('glance', 'put_vdis', kwargs) + task = session.async_call_plugin('glance', 'upload_vhd', kwargs) session.wait_for_task(instance_id, task) @classmethod - def fetch_image(cls, session, instance_id, image, user, project, type): + def fetch_image(cls, session, instance_id, image, user, project, + image_type): """ - type is interpreted as an ImageType instance + image_type is interpreted as an ImageType instance Related flags: xenapi_image_service = ['glance', 'objectstore'] glance_address = 'address for glance services' @@ -337,35 +347,80 @@ class VMHelper(HelperBase): if FLAGS.xenapi_image_service == 'glance': return cls._fetch_image_glance(session, instance_id, image, - access, type) + access, image_type) else: return cls._fetch_image_objectstore(session, instance_id, image, - access, user.secret, type) + access, user.secret, + image_type) @classmethod - def _fetch_image_glance(cls, session, instance_id, image, access, type): - sr = find_sr(session) - if sr is None: - raise exception.NotFound('Cannot find SR to write VDI to') + def _fetch_image_glance_vhd(cls, session, instance_id, image, access, + image_type): + LOG.debug(_("Asking xapi to fetch vhd image %(image)s") + % locals()) + + sr_ref = safe_find_sr(session) + + # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not + # have the `uuid` module. To work around this, we generate the uuids + # here (under Python 2.6+) and pass them as arguments + uuid_stack = [str(uuid.uuid4()) for i in xrange(2)] + + params = {'image_id': image, + 'glance_host': FLAGS.glance_host, + 'glance_port': FLAGS.glance_port, + 'uuid_stack': uuid_stack, + 'sr_path': get_sr_path(session)} + + kwargs = {'params': pickle.dumps(params)} + task = session.async_call_plugin('glance', 'download_vhd', kwargs) + vdi_uuid = session.wait_for_task(instance_id, task) + + scan_sr(session, instance_id, sr_ref) + + # Set the name-label to ease debugging + vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) + name_label = get_name_label_for_image(image) + session.get_xenapi().VDI.set_name_label(vdi_ref, name_label) + + LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s") + % locals()) + return vdi_uuid + + @classmethod + def _fetch_image_glance_disk(cls, session, instance_id, image, access, + image_type): + """Fetch the image from Glance - c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + NOTE: + Unlike _fetch_image_glance_vhd, this method does not use the Glance + plugin; instead, it streams the disks through domU to the VDI + directly. - meta, image_file = c.get_image(image) + """ + # FIXME(sirp): Since the Glance plugin seems to be required for the + # VHD disk, it may be worth using the plugin for both VHD and RAW and + # DISK restores + sr_ref = safe_find_sr(session) + + client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + meta, image_file = client.get_image(image) virtual_size = int(meta['size']) vdi_size = virtual_size LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals()) - if type == ImageType.DISK: + + if image_type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES - vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, - vdi_size, False) + name_label = get_name_label_for_image(image) + vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) with_vdi_attached_here(session, vdi, False, lambda dev: - _stream_disk(dev, type, + _stream_disk(dev, image_type, virtual_size, image_file)) - if (type == ImageType.KERNEL_RAMDISK): + if image_type == ImageType.KERNEL_RAMDISK: #we need to invoke a plugin for copying VDI's #content into proper path LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi) @@ -384,20 +439,87 @@ class VMHelper(HelperBase): return session.get_xenapi().VDI.get_uuid(vdi) @classmethod + def determine_disk_image_type(cls, instance): + """Disk Image Types are used to determine where the kernel will reside + within an image. To figure out which type we're dealing with, we use + the following rules: + + 1. If we're using Glance, we can use the image_type field to + determine the image_type + + 2. If we're not using Glance, then we need to deduce this based on + whether a kernel_id is specified. + """ + def log_disk_format(image_type): + pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK', + ImageType.DISK: 'DISK', + ImageType.DISK_RAW: 'DISK_RAW', + ImageType.DISK_VHD: 'DISK_VHD'} + disk_format = pretty_format[image_type] + image_id = instance.image_id + instance_id = instance.id + LOG.debug(_("Detected %(disk_format)s format for image " + "%(image_id)s, instance %(instance_id)s") % locals()) + + def determine_from_glance(): + glance_type2nova_type = {'machine': ImageType.DISK, + 'raw': ImageType.DISK_RAW, + 'vhd': ImageType.DISK_VHD, + 'kernel': ImageType.KERNEL_RAMDISK, + 'ramdisk': ImageType.KERNEL_RAMDISK} + client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + meta = client.get_image_meta(instance.image_id) + type_ = meta['type'] + try: + return glance_type2nova_type[type_] + except KeyError: + raise exception.NotFound( + _("Unrecognized image type '%(type_)s'") % locals()) + + def determine_from_instance(): + if instance.kernel_id: + return ImageType.DISK + else: + return ImageType.DISK_RAW + + # FIXME(sirp): can we unify the ImageService and xenapi_image_service + # abstractions? + if FLAGS.xenapi_image_service == 'glance': + image_type = determine_from_glance() + else: + image_type = determine_from_instance() + + log_disk_format(image_type) + return image_type + + @classmethod + def _fetch_image_glance(cls, session, instance_id, image, access, + image_type): + if image_type == ImageType.DISK_VHD: + return cls._fetch_image_glance_vhd( + session, instance_id, image, access, image_type) + else: + return cls._fetch_image_glance_disk( + session, instance_id, image, access, image_type) + + @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, - secret, type): + secret, image_type): url = images.image_url(image) LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) - fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' + if image_type == ImageType.KERNEL_RAMDISK: + fn = 'get_kernel' + else: + fn = 'get_vdi' args = {} args['src_url'] = url args['username'] = access args['password'] = secret args['add_partition'] = 'false' args['raw'] = 'false' - if type != ImageType.KERNEL_RAMDISK: + if image_type != ImageType.KERNEL_RAMDISK: args['add_partition'] = 'true' - if type == ImageType.DISK_RAW: + if image_type == ImageType.DISK_RAW: args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) uuid = session.wait_for_task(instance_id, task) @@ -405,6 +527,9 @@ class VMHelper(HelperBase): @classmethod def lookup_image(cls, session, instance_id, vdi_ref): + """ + Determine if VDI is using a PV kernel + """ if FLAGS.xenapi_image_service == 'glance': return cls._lookup_image_glance(session, vdi_ref) else: @@ -609,7 +734,33 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, return parent_uuid +def get_vdi_for_vm_safely(session, vm_ref): + vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) + if vdi_refs is None: + raise Exception(_("No VDIs found for VM %s") % vm_ref) + else: + num_vdis = len(vdi_refs) + if num_vdis != 1: + raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found" + " for VM %(vm_ref)s") % locals()) + + vdi_ref = vdi_refs[0] + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + return vdi_ref, vdi_rec + + +def safe_find_sr(session): + """Same as find_sr except raises a NotFound exception if SR cannot be + determined + """ + sr_ref = find_sr(session) + if sr_ref is None: + raise exception.NotFound(_('Cannot find SR to read/write VDI')) + return sr_ref + + def find_sr(session): + """Return the storage repository to hold VM images""" host = session.get_xenapi_host() srs = session.get_xenapi().SR.get_all() for sr in srs: @@ -624,6 +775,18 @@ def find_sr(session): return None +def get_sr_path(session): + """Return the path to our storage repository + + This is used when we're dealing with VHDs directly, either by taking + snapshots or by restoring an image in the DISK_VHD format. + """ + sr_ref = safe_find_sr(session) + sr_rec = session.get_xenapi().SR.get_record(sr_ref) + sr_uuid = sr_rec["uuid"] + return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid) + + def remap_vbd_dev(dev): """Return the appropriate location for a plugged-in VBD device @@ -737,9 +900,9 @@ def _is_vdi_pv(dev): return False -def _stream_disk(dev, type, virtual_size, image_file): +def _stream_disk(dev, image_type, virtual_size, image_file): offset = 0 - if type == ImageType.DISK: + if image_type == ImageType.DISK: offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) @@ -768,3 +931,8 @@ def _write_partition(virtual_size, dev): (dest, primary_first, primary_last)) LOG.debug(_('Writing partition table %s done.'), dest) + + +def get_name_label_for_image(image): + # TODO(sirp): This should eventually be the URI for the Glance image + return _('Glance image %s') % image diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b3e5627d8..5157f18f1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -95,29 +95,32 @@ class VMOps(object): # Are we building from a pre-existing disk? if not disk: #if kernel is not present we must download a raw disk - if instance.kernel_id: - disk_image_type = ImageType.DISK - else: - disk_image_type = ImageType.DISK_RAW + + disk_image_type = VMHelper.determine_disk_image_type(instance) vdi_uuid = VMHelper.fetch_image(self._session, instance.id, - instance.image_id, user, project, disk_image_type) + instance.image_id, user, project, disk_image_type) vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) else: vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', disk) - #Have a look at the VDI and see if it has a PV kernel - if not instance.kernel_id: + if disk_image_type == ImageType.DISK_RAW: + #Have a look at the VDI and see if it has a PV kernel pv_kernel = VMHelper.lookup_image(self._session, instance.id, vdi_ref) + elif disk_image_type == ImageType.DISK_VHD: + # TODO(sirp): Assuming PV for now; this will need to be + # configurable as Windows will use HVM. + pv_kernel = True + + #Have a look at the VDI and see if it has a PV kernel if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, - ImageType.KERNEL_RAMDISK) + instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) + if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, - ImageType.KERNEL_RAMDISK) + instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, pv_kernel) @@ -425,6 +428,9 @@ class VMOps(object): locals()) return + instance_id = instance.id + LOG.debug(_("Shutting down VM for Instance %(instance_id)s") + % locals()) try: task = None if hard: @@ -438,6 +444,9 @@ class VMOps(object): def _destroy_vdis(self, instance, vm): """Destroys all VDIs associated with a VM """ + instance_id = instance.id + LOG.debug(_("Destroying VDIs for Instance %(instance_id)s") + % locals()) vdis = VMHelper.lookup_vm_vdis(self._session, vm) if not vdis: @@ -450,31 +459,56 @@ class VMOps(object): except self.XenAPI.Failure, exc: LOG.exception(exc) - def _destroy_vm(self, instance, vm, destroy_kernel_ramdisk): + def _destroy_kernel_ramdisk(self, instance, vm): + """ + Three situations can occur: + + 1. We have neither a ramdisk nor a kernel, in which case we are a + RAW image and can omit this step + + 2. We have one or the other, in which case, we should flag as an + error + + 3. We have both, in which case we safely remove both the kernel + and the ramdisk. + """ + instance_id = instance.id + if not instance.kernel_id and not instance.ramdisk_id: + # 1. No kernel or ramdisk + LOG.debug(_("Instance %(instance_id)s using RAW or VHD, " + "skipping kernel and ramdisk deletion") % locals()) + return + + if not (instance.kernel_id and instance.ramdisk_id): + # 2. We only have kernel xor ramdisk + raise exception.NotFound( + _("Instance %(instance_id)s has a kernel or ramdisk but not " + "both" % locals())) + + # 3. We have both kernel and ramdisk + (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk( + self._session, vm) + + LOG.debug(_("Removing kernel/ramdisk files")) + + args = {'kernel-file': kernel, 'ramdisk-file': ramdisk} + task = self._session.async_call_plugin( + 'glance', 'remove_kernel_ramdisk', args) + self._session.wait_for_task(instance.id, task) + + LOG.debug(_("kernel/ramdisk files removed")) + + def _destroy_vm(self, instance, vm): """Destroys a VM record """ + instance_id = instance.id try: - kernel = None - ramdisk = None - if instance.kernel_id or instance.ramdisk_id: - (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk( - self._session, vm) - task1 = self._session.call_xenapi('Async.VM.destroy', vm) - if destroy_kernel_ramdisk: - LOG.debug(_("Removing kernel/ramdisk files")) - fn = "remove_kernel_ramdisk" - args = {} - if kernel: - args['kernel-file'] = kernel - if ramdisk: - args['ramdisk-file'] = ramdisk - task2 = self._session.async_call_plugin('glance', fn, args) - LOG.debug(_("kernel/ramdisk files removed")) - self._session.wait_for_task(instance.id, task1) - if destroy_kernel_ramdisk: - self._session.wait_for_task(instance.id, task2) + task = self._session.call_xenapi('Async.VM.destroy', vm) + self._session.wait_for_task(instance_id, task) except self.XenAPI.Failure, exc: LOG.exception(exc) + LOG.debug(_("Instance %(instance_id)s VM destroyed") % locals()) + def destroy(self, instance): """ Destroy VM instance @@ -482,28 +516,32 @@ class VMOps(object): This is the method exposed by xenapi_conn.destroy(). The rest of the destroy_* methods are internal. """ + instance_id = instance.id + LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals()) vm = VMHelper.lookup(self._session, instance.name) return self._destroy(instance, vm, shutdown=True) def _destroy(self, instance, vm, shutdown=True, - destroy_kernel_ramdisk=True): + destroy_kernel_ramdisk=True): """ Destroys VM instance by performing: - 1. A shutdown if requested - 2. Destroying associated VDIs - 3. Destroying that actual VM record + 1. A shutdown if requested + 2. Destroying associated VDIs + 3. Destroying kernel and ramdisk files (if necessary) + 4. Destroying that actual VM record """ if vm is None: - # Don't complain, just return. This lets us clean up instances - # that have already disappeared from the underlying platform. + LOG.warning(_("VM is not present, skipping destroy...")) return if shutdown: self._shutdown(instance, vm) self._destroy_vdis(instance, vm) - self._destroy_vm(instance, vm, destroy_kernel_ramdisk) + if destroy_kernel_ramdisk: + self._destroy_kernel_ramdisk(instance, vm) + self._destroy_vm(instance, vm) def _wait_with_callback(self, instance_id, task, callback): ret = None diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 3df934af3..f4fb609d7 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -100,6 +100,8 @@ flags.DEFINE_integer('xenapi_vhd_coalesce_max_attempts', 5, 'Max number of times to poll for VHD to coalesce.' ' Used only if connection_type=xenapi.') +flags.DEFINE_string('xenapi_sr_base_path', '/var/run/sr-mount', + 'Base path to the storage repository') flags.DEFINE_string('target_host', None, 'iSCSI Target Host') diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index e82614e70..d08754c19 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -21,17 +21,14 @@ # XenAPI plugin for managing glance images # -import base64 -import errno -import hmac import httplib import os import os.path import pickle -import sha +import shlex +import shutil import subprocess -import time -import urlparse +import tempfile import XenAPIPlugin @@ -41,30 +38,6 @@ configure_logging('glance') CHUNK_SIZE = 8192 KERNEL_DIR = '/boot/guest' -FILE_SR_PATH = '/var/run/sr-mount' - - -def remove_kernel_ramdisk(session, args): - """Removes kernel and/or ramdisk from dom0's file system""" - kernel_file = exists(args, 'kernel-file') - ramdisk_file = exists(args, 'ramdisk-file') - if kernel_file: - os.remove(kernel_file) - if ramdisk_file: - os.remove(ramdisk_file) - return "ok" - - -def copy_kernel_vdi(session, args): - vdi = exists(args, 'vdi-ref') - size = exists(args, 'image-size') - #Use the uuid as a filename - vdi_uuid = session.xenapi.VDI.get_uuid(vdi) - copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)} - filename = with_vdi_in_dom0(session, vdi, False, - lambda dev: - _copy_kernel_vdi('/dev/%s' % dev, copy_args)) - return filename def _copy_kernel_vdi(dest, copy_args): @@ -89,93 +62,309 @@ def _copy_kernel_vdi(dest, copy_args): return filename -def put_vdis(session, args): +def _download_tarball(sr_path, staging_path, image_id, glance_host, + glance_port): + """Download the tarball image from Glance and extract it into the staging + area. + """ + conn = httplib.HTTPConnection(glance_host, glance_port) + conn.request('GET', '/images/%s' % image_id) + resp = conn.getresponse() + if resp.status == httplib.NOT_FOUND: + raise Exception("Image '%s' not found in Glance" % image_id) + elif resp.status != httplib.OK: + raise Exception("Unexpected response from Glance %i" % res.status) + + tar_cmd = "tar -zx --directory=%(staging_path)s" % locals() + tar_proc = _make_subprocess(tar_cmd, stderr=True, stdin=True) + + chunk = resp.read(CHUNK_SIZE) + while chunk: + tar_proc.stdin.write(chunk) + chunk = resp.read(CHUNK_SIZE) + + _finish_subprocess(tar_proc, tar_cmd) + conn.close() + + +def _fixup_vhds(sr_path, staging_path, uuid_stack): + """Fixup the downloaded VHDs before we move them into the SR. + + We cannot extract VHDs directly into the SR since they don't yet have + UUIDs, aren't properly associated with each other, and would be subject to + a race-condition of one-file being present and the other not being + downloaded yet. + + To avoid these we problems, we use a staging area to fixup the VHDs before + moving them into the SR. The steps involved are: + + 1. Extracting tarball into staging area + + 2. Renaming VHDs to use UUIDs ('snap.vhd' -> 'ffff-aaaa-...vhd') + + 3. Linking the two VHDs together + + 4. Pseudo-atomically moving the images into the SR. (It's not really + atomic because it takes place as two os.rename operations; however, + the chances of an SR.scan occuring between the two rename() + invocations is so small that we can safely ignore it) + """ + def rename_with_uuid(orig_path): + """Rename VHD using UUID so that it will be recognized by SR on a + subsequent scan. + + Since Python2.4 doesn't have the `uuid` module, we pass a stack of + pre-computed UUIDs from the compute worker. + """ + orig_dirname = os.path.dirname(orig_path) + uuid = uuid_stack.pop() + new_path = os.path.join(orig_dirname, "%s.vhd" % uuid) + os.rename(orig_path, new_path) + return new_path, uuid + + def link_vhds(child_path, parent_path): + """Use vhd-util to associate the snapshot VHD with its base_copy. + + This needs to be done before we move both VHDs into the SR to prevent + the base_copy from being DOA (deleted-on-arrival). + """ + modify_cmd = ("vhd-util modify -n %(child_path)s -p %(parent_path)s" + % locals()) + modify_proc = _make_subprocess(modify_cmd, stderr=True) + _finish_subprocess(modify_proc, modify_cmd) + + def move_into_sr(orig_path): + """Move a file into the SR""" + filename = os.path.basename(orig_path) + new_path = os.path.join(sr_path, filename) + os.rename(orig_path, new_path) + return new_path + + def assert_vhd_not_hidden(path): + """ + This is a sanity check on the image; if a snap.vhd isn't + present, then the image.vhd better not be marked 'hidden' or it will + be deleted when moved into the SR. + """ + query_cmd = "vhd-util query -n %(path)s -f" % locals() + query_proc = _make_subprocess(query_cmd, stdout=True, stderr=True) + out, err = _finish_subprocess(query_proc, query_cmd) + + for line in out.splitlines(): + if line.startswith('hidden'): + value = line.split(':')[1].strip() + if value == "1": + raise Exception( + "VHD %(path)s is marked as hidden without child" % + locals()) + + orig_base_copy_path = os.path.join(staging_path, 'image.vhd') + if not os.path.exists(orig_base_copy_path): + raise Exception("Invalid image: image.vhd not present") + + base_copy_path, base_copy_uuid = rename_with_uuid(orig_base_copy_path) + + vdi_uuid = base_copy_uuid + orig_snap_path = os.path.join(staging_path, 'snap.vhd') + if os.path.exists(orig_snap_path): + snap_path, snap_uuid = rename_with_uuid(orig_snap_path) + vdi_uuid = snap_uuid + # NOTE(sirp): this step is necessary so that an SR scan won't + # delete the base_copy out from under us (since it would be + # orphaned) + link_vhds(snap_path, base_copy_path) + move_into_sr(snap_path) + else: + assert_vhd_not_hidden(base_copy_path) + + move_into_sr(base_copy_path) + return vdi_uuid + + +def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): + """Hard-link VHDs into staging area with appropriate filename + ('snap' or 'image.vhd') + """ + for name, uuid in vdi_uuids.items(): + source = os.path.join(sr_path, "%s.vhd" % uuid) + link_name = os.path.join(staging_path, "%s.vhd" % name) + os.link(source, link_name) + + +def _upload_tarball(staging_path, image_id, glance_host, glance_port): + """ + Create a tarball of the image and then stream that into Glance + using chunked-transfer-encoded HTTP. + """ + conn = httplib.HTTPConnection(glance_host, glance_port) + # NOTE(sirp): httplib under python2.4 won't accept a file-like object + # to request + conn.putrequest('PUT', '/images/%s' % image_id) + + # TODO(sirp): make `store` configurable + headers = { + 'content-type': 'application/octet-stream', + 'transfer-encoding': 'chunked', + 'x-image-meta-is_public': 'True', + 'x-image-meta-status': 'queued', + 'x-image-meta-type': 'vhd', } + for header, value in headers.iteritems(): + conn.putheader(header, value) + conn.endheaders() + + tar_cmd = "tar -zc --directory=%(staging_path)s ." % locals() + tar_proc = _make_subprocess(tar_cmd, stdout=True, stderr=True) + + chunk = tar_proc.stdout.read(CHUNK_SIZE) + while chunk: + conn.send("%x\r\n%s\r\n" % (len(chunk), chunk)) + chunk = tar_proc.stdout.read(CHUNK_SIZE) + conn.send("0\r\n\r\n") + + _finish_subprocess(tar_proc, tar_cmd) + + resp = conn.getresponse() + if resp.status != httplib.OK: + raise Exception("Unexpected response from Glance %i" % resp.status) + conn.close() + + +def _make_staging_area(sr_path): + """ + The staging area is a place where we can temporarily store and + manipulate VHDs. The use of the staging area is different for upload and + download: + + Download + ======== + + When we download the tarball, the VHDs contained within will have names + like "snap.vhd" and "image.vhd". We need to assign UUIDs to them before + moving them into the SR. However, since 'image.vhd' may be a base_copy, we + need to link it to 'snap.vhd' (using vhd-util modify) before moving both + into the SR (otherwise the SR.scan will cause 'image.vhd' to be deleted). + The staging area gives us a place to perform these operations before they + are moved to the SR, scanned, and then registered with XenServer. + + Upload + ====== + + On upload, we want to rename the VHDs to reflect what they are, 'snap.vhd' + in the case of the snapshot VHD, and 'image.vhd' in the case of the + base_copy. The staging area provides a directory in which we can create + hard-links to rename the VHDs without affecting what's in the SR. + + + NOTE + ==== + + The staging area is created as a subdirectory within the SR in order to + guarantee that it resides within the same filesystem and therefore permit + hard-linking and cheap file moves. + """ + staging_path = tempfile.mkdtemp(dir=sr_path) + return staging_path + + +def _cleanup_staging_area(staging_path): + """Remove staging area directory + + On upload, the staging area contains hard-links to the VHDs in the SR; + it's safe to remove the staging-area because the SR will keep the link + count > 0 (so the VHDs in the SR will not be deleted). + """ + shutil.rmtree(staging_path) + + +def _make_subprocess(cmdline, stdout=False, stderr=False, stdin=False): + """Make a subprocess according to the given command-line string + """ + kwargs = {} + kwargs['stdout'] = stdout and subprocess.PIPE or None + kwargs['stderr'] = stderr and subprocess.PIPE or None + kwargs['stdin'] = stdin and subprocess.PIPE or None + args = shlex.split(cmdline) + proc = subprocess.Popen(args, **kwargs) + return proc + + +def _finish_subprocess(proc, cmdline): + """Ensure that the process returned a zero exit code indicating success + """ + out, err = proc.communicate() + ret = proc.returncode + if ret != 0: + raise Exception("'%(cmdline)s' returned non-zero exit code: " + "retcode=%(ret)i, stderr='%(err)s'" % locals()) + return out, err + + +def download_vhd(session, args): + """Download an image from Glance, unbundle it, and then deposit the VHDs + into the storage repository + """ params = pickle.loads(exists(args, 'params')) - vdi_uuids = params["vdi_uuids"] image_id = params["image_id"] glance_host = params["glance_host"] glance_port = params["glance_port"] + uuid_stack = params["uuid_stack"] + sr_path = params["sr_path"] - sr_path = get_sr_path(session) - #FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs - tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id)) - tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path] - paths = ["%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids] - tar_cmd.extend(paths) - logging.debug("Bundling image with cmd: %s", tar_cmd) - subprocess.call(tar_cmd) - logging.debug("Writing to test file %s", tmp_file) - put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port) - # FIXME(sirp): return anything useful here? - return "" - - -def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port): - size = os.path.getsize(tmp_file) - basename = os.path.basename(tmp_file) - - bundle = open(tmp_file, 'r') + staging_path = _make_staging_area(sr_path) try: - headers = { - 'x-image-meta-store': 'file', - 'x-image-meta-is_public': 'True', - 'x-image-meta-type': 'raw', - 'x-image-meta-size': size, - 'content-length': size, - 'content-type': 'application/octet-stream', - } - conn = httplib.HTTPConnection(glance_host, glance_port) - #NOTE(sirp): httplib under python2.4 won't accept a file-like object - # to request - conn.putrequest('PUT', '/images/%s' % image_id) - - for header, value in headers.iteritems(): - conn.putheader(header, value) - conn.endheaders() - - chunk = bundle.read(CHUNK_SIZE) - while chunk: - conn.send(chunk) - chunk = bundle.read(CHUNK_SIZE) - - res = conn.getresponse() - #FIXME(sirp): should this be 201 Created? - if res.status != httplib.OK: - raise Exception("Unexpected response from Glance %i" % res.status) + _download_tarball(sr_path, staging_path, image_id, glance_host, + glance_port) + vdi_uuid = _fixup_vhds(sr_path, staging_path, uuid_stack) + return vdi_uuid finally: - bundle.close() + _cleanup_staging_area(staging_path) + + +def upload_vhd(session, args): + """Bundle the VHDs comprising an image and then stream them into Glance. + """ + params = pickle.loads(exists(args, 'params')) + vdi_uuids = params["vdi_uuids"] + image_id = params["image_id"] + glance_host = params["glance_host"] + glance_port = params["glance_port"] + sr_path = params["sr_path"] + staging_path = _make_staging_area(sr_path) + try: + _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids) + _upload_tarball(staging_path, image_id, glance_host, glance_port) + finally: + _cleanup_staging_area(staging_path) -def get_sr_path(session): - sr_ref = find_sr(session) + return "" # Nothing useful to return on an upload - if sr_ref is None: - raise Exception('Cannot find SR to read VDI from') - sr_rec = session.xenapi.SR.get_record(sr_ref) - sr_uuid = sr_rec["uuid"] - sr_path = os.path.join(FILE_SR_PATH, sr_uuid) - return sr_path +def copy_kernel_vdi(session, args): + vdi = exists(args, 'vdi-ref') + size = exists(args, 'image-size') + #Use the uuid as a filename + vdi_uuid = session.xenapi.VDI.get_uuid(vdi) + copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)} + filename = with_vdi_in_dom0(session, vdi, False, + lambda dev: + _copy_kernel_vdi('/dev/%s' % dev, copy_args)) + return filename -#TODO(sirp): objectstore, migration and glance need this, should this be -# refactored into common lib -def find_sr(session): - host = get_this_host(session) - srs = session.xenapi.SR.get_all() - for sr in srs: - sr_rec = session.xenapi.SR.get_record(sr) - if not ('i18n-key' in sr_rec['other_config'] and - sr_rec['other_config']['i18n-key'] == 'local-storage'): - continue - for pbd in sr_rec['PBDs']: - pbd_rec = session.xenapi.PBD.get_record(pbd) - if pbd_rec['host'] == host: - return sr - return None +def remove_kernel_ramdisk(session, args): + """Removes kernel and/or ramdisk from dom0's file system""" + kernel_file = exists(args, 'kernel-file') + ramdisk_file = exists(args, 'ramdisk-file') + if kernel_file: + os.remove(kernel_file) + if ramdisk_file: + os.remove(ramdisk_file) + return "ok" if __name__ == '__main__': - XenAPIPlugin.dispatch({'put_vdis': put_vdis, + XenAPIPlugin.dispatch({'upload_vhd': upload_vhd, + 'download_vhd': download_vhd, 'copy_kernel_vdi': copy_kernel_vdi, 'remove_kernel_ramdisk': remove_kernel_ramdisk}) diff --git a/run_tests.sh b/run_tests.sh index 7ac3ff33f..8f4d37cd4 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -84,7 +84,7 @@ fi if [ -z "$noseargs" ]; then srcfiles=`find bin -type f ! -name "nova.conf*"` - srcfiles+=" nova setup.py" + srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance" run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles} || exit 1 else run_tests |
