diff options
| author | Cerberus <matt.dietz@rackspace.com> | 2011-02-28 15:42:54 -0600 |
|---|---|---|
| committer | Cerberus <matt.dietz@rackspace.com> | 2011-02-28 15:42:54 -0600 |
| commit | 8a9130eae5a421c52573a0fb34fb0125e594ef83 (patch) | |
| tree | c2612ae7cba187abc1714f74a5d1408accadd010 /nova/virt | |
| parent | 8da6796789767b1341cb5a650066b67ad3191c74 (diff) | |
| parent | 78bd53a44a529f0c6641357fa001d4d037fbc375 (diff) | |
Merge from trunk and mega conflict resolution
Diffstat (limited to 'nova/virt')
| -rw-r--r-- | nova/virt/xenapi/vm_utils.py | 216 | ||||
| -rw-r--r-- | nova/virt/xenapi/vmops.py | 114 | ||||
| -rw-r--r-- | nova/virt/xenapi_conn.py | 2 |
3 files changed, 270 insertions, 62 deletions
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 870660dea..eea9bb0b9 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -24,6 +24,7 @@ import pickle import re import time import urllib +import uuid from xml.dom import minidom from eventlet import event @@ -63,11 +64,14 @@ class ImageType: 0 - kernel/ramdisk image (goes on dom0's filesystem) 1 - disk image (local SR, partitioned by objectstore plugin) 2 - raw disk image (local SR, NOT partitioned by plugin) + 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for + linux, HVM assumed for Windows) """ KERNEL_RAMDISK = 0 DISK = 1 DISK_RAW = 2 + DISK_VHD = 3 class VMHelper(HelperBase): @@ -292,7 +296,9 @@ class VMHelper(HelperBase): session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) #TODO(sirp): we need to assert only one parent, not parents two deep - return template_vm_ref, [template_vdi_uuid, parent_uuid] + template_vdi_uuids = {'image': parent_uuid, + 'snap': template_vdi_uuid} + return template_vm_ref, template_vdi_uuids @classmethod def get_sr(cls, session, sr_label='slices'): @@ -312,22 +318,26 @@ class VMHelper(HelperBase): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ + # NOTE(sirp): Currently we only support uploading images as VHD, there + # is no RAW equivalent (yet) logging.debug(_("Asking xapi to upload %(vdi_uuids)s as" " ID %(image_id)s") % locals()) params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, 'glance_host': FLAGS.glance_host, - 'glance_port': FLAGS.glance_port} + 'glance_port': FLAGS.glance_port, + 'sr_path': get_sr_path(session)} kwargs = {'params': pickle.dumps(params)} - task = session.async_call_plugin('glance', 'put_vdis', kwargs) + task = session.async_call_plugin('glance', 'upload_vhd', kwargs) session.wait_for_task(instance_id, task) @classmethod - def fetch_image(cls, session, instance_id, image, user, project, type): + def fetch_image(cls, session, instance_id, image, user, project, + image_type): """ - type is interpreted as an ImageType instance + image_type is interpreted as an ImageType instance Related flags: xenapi_image_service = ['glance', 'objectstore'] glance_address = 'address for glance services' @@ -337,35 +347,80 @@ class VMHelper(HelperBase): if FLAGS.xenapi_image_service == 'glance': return cls._fetch_image_glance(session, instance_id, image, - access, type) + access, image_type) else: return cls._fetch_image_objectstore(session, instance_id, image, - access, user.secret, type) + access, user.secret, + image_type) @classmethod - def _fetch_image_glance(cls, session, instance_id, image, access, type): - sr = find_sr(session) - if sr is None: - raise exception.NotFound('Cannot find SR to write VDI to') + def _fetch_image_glance_vhd(cls, session, instance_id, image, access, + image_type): + LOG.debug(_("Asking xapi to fetch vhd image %(image)s") + % locals()) + + sr_ref = safe_find_sr(session) + + # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not + # have the `uuid` module. To work around this, we generate the uuids + # here (under Python 2.6+) and pass them as arguments + uuid_stack = [str(uuid.uuid4()) for i in xrange(2)] + + params = {'image_id': image, + 'glance_host': FLAGS.glance_host, + 'glance_port': FLAGS.glance_port, + 'uuid_stack': uuid_stack, + 'sr_path': get_sr_path(session)} + + kwargs = {'params': pickle.dumps(params)} + task = session.async_call_plugin('glance', 'download_vhd', kwargs) + vdi_uuid = session.wait_for_task(instance_id, task) + + scan_sr(session, instance_id, sr_ref) + + # Set the name-label to ease debugging + vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) + name_label = get_name_label_for_image(image) + session.get_xenapi().VDI.set_name_label(vdi_ref, name_label) + + LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s") + % locals()) + return vdi_uuid + + @classmethod + def _fetch_image_glance_disk(cls, session, instance_id, image, access, + image_type): + """Fetch the image from Glance - c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + NOTE: + Unlike _fetch_image_glance_vhd, this method does not use the Glance + plugin; instead, it streams the disks through domU to the VDI + directly. - meta, image_file = c.get_image(image) + """ + # FIXME(sirp): Since the Glance plugin seems to be required for the + # VHD disk, it may be worth using the plugin for both VHD and RAW and + # DISK restores + sr_ref = safe_find_sr(session) + + client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + meta, image_file = client.get_image(image) virtual_size = int(meta['size']) vdi_size = virtual_size LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals()) - if type == ImageType.DISK: + + if image_type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES - vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, - vdi_size, False) + name_label = get_name_label_for_image(image) + vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) with_vdi_attached_here(session, vdi, False, lambda dev: - _stream_disk(dev, type, + _stream_disk(dev, image_type, virtual_size, image_file)) - if (type == ImageType.KERNEL_RAMDISK): + if image_type == ImageType.KERNEL_RAMDISK: #we need to invoke a plugin for copying VDI's #content into proper path LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi) @@ -384,20 +439,87 @@ class VMHelper(HelperBase): return session.get_xenapi().VDI.get_uuid(vdi) @classmethod + def determine_disk_image_type(cls, instance): + """Disk Image Types are used to determine where the kernel will reside + within an image. To figure out which type we're dealing with, we use + the following rules: + + 1. If we're using Glance, we can use the image_type field to + determine the image_type + + 2. If we're not using Glance, then we need to deduce this based on + whether a kernel_id is specified. + """ + def log_disk_format(image_type): + pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK', + ImageType.DISK: 'DISK', + ImageType.DISK_RAW: 'DISK_RAW', + ImageType.DISK_VHD: 'DISK_VHD'} + disk_format = pretty_format[image_type] + image_id = instance.image_id + instance_id = instance.id + LOG.debug(_("Detected %(disk_format)s format for image " + "%(image_id)s, instance %(instance_id)s") % locals()) + + def determine_from_glance(): + glance_type2nova_type = {'machine': ImageType.DISK, + 'raw': ImageType.DISK_RAW, + 'vhd': ImageType.DISK_VHD, + 'kernel': ImageType.KERNEL_RAMDISK, + 'ramdisk': ImageType.KERNEL_RAMDISK} + client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + meta = client.get_image_meta(instance.image_id) + type_ = meta['type'] + try: + return glance_type2nova_type[type_] + except KeyError: + raise exception.NotFound( + _("Unrecognized image type '%(type_)s'") % locals()) + + def determine_from_instance(): + if instance.kernel_id: + return ImageType.DISK + else: + return ImageType.DISK_RAW + + # FIXME(sirp): can we unify the ImageService and xenapi_image_service + # abstractions? + if FLAGS.xenapi_image_service == 'glance': + image_type = determine_from_glance() + else: + image_type = determine_from_instance() + + log_disk_format(image_type) + return image_type + + @classmethod + def _fetch_image_glance(cls, session, instance_id, image, access, + image_type): + if image_type == ImageType.DISK_VHD: + return cls._fetch_image_glance_vhd( + session, instance_id, image, access, image_type) + else: + return cls._fetch_image_glance_disk( + session, instance_id, image, access, image_type) + + @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, - secret, type): + secret, image_type): url = images.image_url(image) LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) - fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' + if image_type == ImageType.KERNEL_RAMDISK: + fn = 'get_kernel' + else: + fn = 'get_vdi' args = {} args['src_url'] = url args['username'] = access args['password'] = secret args['add_partition'] = 'false' args['raw'] = 'false' - if type != ImageType.KERNEL_RAMDISK: + if image_type != ImageType.KERNEL_RAMDISK: args['add_partition'] = 'true' - if type == ImageType.DISK_RAW: + if image_type == ImageType.DISK_RAW: args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) uuid = session.wait_for_task(instance_id, task) @@ -405,6 +527,9 @@ class VMHelper(HelperBase): @classmethod def lookup_image(cls, session, instance_id, vdi_ref): + """ + Determine if VDI is using a PV kernel + """ if FLAGS.xenapi_image_service == 'glance': return cls._lookup_image_glance(session, vdi_ref) else: @@ -609,7 +734,33 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, return parent_uuid +def get_vdi_for_vm_safely(session, vm_ref): + vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) + if vdi_refs is None: + raise Exception(_("No VDIs found for VM %s") % vm_ref) + else: + num_vdis = len(vdi_refs) + if num_vdis != 1: + raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found" + " for VM %(vm_ref)s") % locals()) + + vdi_ref = vdi_refs[0] + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + return vdi_ref, vdi_rec + + +def safe_find_sr(session): + """Same as find_sr except raises a NotFound exception if SR cannot be + determined + """ + sr_ref = find_sr(session) + if sr_ref is None: + raise exception.NotFound(_('Cannot find SR to read/write VDI')) + return sr_ref + + def find_sr(session): + """Return the storage repository to hold VM images""" host = session.get_xenapi_host() srs = session.get_xenapi().SR.get_all() for sr in srs: @@ -624,6 +775,18 @@ def find_sr(session): return None +def get_sr_path(session): + """Return the path to our storage repository + + This is used when we're dealing with VHDs directly, either by taking + snapshots or by restoring an image in the DISK_VHD format. + """ + sr_ref = safe_find_sr(session) + sr_rec = session.get_xenapi().SR.get_record(sr_ref) + sr_uuid = sr_rec["uuid"] + return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid) + + def remap_vbd_dev(dev): """Return the appropriate location for a plugged-in VBD device @@ -737,9 +900,9 @@ def _is_vdi_pv(dev): return False -def _stream_disk(dev, type, virtual_size, image_file): +def _stream_disk(dev, image_type, virtual_size, image_file): offset = 0 - if type == ImageType.DISK: + if image_type == ImageType.DISK: offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) @@ -768,3 +931,8 @@ def _write_partition(virtual_size, dev): (dest, primary_first, primary_last)) LOG.debug(_('Writing partition table %s done.'), dest) + + +def get_name_label_for_image(image): + # TODO(sirp): This should eventually be the URI for the Glance image + return _('Glance image %s') % image diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b3e5627d8..5157f18f1 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -95,29 +95,32 @@ class VMOps(object): # Are we building from a pre-existing disk? if not disk: #if kernel is not present we must download a raw disk - if instance.kernel_id: - disk_image_type = ImageType.DISK - else: - disk_image_type = ImageType.DISK_RAW + + disk_image_type = VMHelper.determine_disk_image_type(instance) vdi_uuid = VMHelper.fetch_image(self._session, instance.id, - instance.image_id, user, project, disk_image_type) + instance.image_id, user, project, disk_image_type) vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) else: vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', disk) - #Have a look at the VDI and see if it has a PV kernel - if not instance.kernel_id: + if disk_image_type == ImageType.DISK_RAW: + #Have a look at the VDI and see if it has a PV kernel pv_kernel = VMHelper.lookup_image(self._session, instance.id, vdi_ref) + elif disk_image_type == ImageType.DISK_VHD: + # TODO(sirp): Assuming PV for now; this will need to be + # configurable as Windows will use HVM. + pv_kernel = True + + #Have a look at the VDI and see if it has a PV kernel if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, - ImageType.KERNEL_RAMDISK) + instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK) + if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, - ImageType.KERNEL_RAMDISK) + instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, pv_kernel) @@ -425,6 +428,9 @@ class VMOps(object): locals()) return + instance_id = instance.id + LOG.debug(_("Shutting down VM for Instance %(instance_id)s") + % locals()) try: task = None if hard: @@ -438,6 +444,9 @@ class VMOps(object): def _destroy_vdis(self, instance, vm): """Destroys all VDIs associated with a VM """ + instance_id = instance.id + LOG.debug(_("Destroying VDIs for Instance %(instance_id)s") + % locals()) vdis = VMHelper.lookup_vm_vdis(self._session, vm) if not vdis: @@ -450,31 +459,56 @@ class VMOps(object): except self.XenAPI.Failure, exc: LOG.exception(exc) - def _destroy_vm(self, instance, vm, destroy_kernel_ramdisk): + def _destroy_kernel_ramdisk(self, instance, vm): + """ + Three situations can occur: + + 1. We have neither a ramdisk nor a kernel, in which case we are a + RAW image and can omit this step + + 2. We have one or the other, in which case, we should flag as an + error + + 3. We have both, in which case we safely remove both the kernel + and the ramdisk. + """ + instance_id = instance.id + if not instance.kernel_id and not instance.ramdisk_id: + # 1. No kernel or ramdisk + LOG.debug(_("Instance %(instance_id)s using RAW or VHD, " + "skipping kernel and ramdisk deletion") % locals()) + return + + if not (instance.kernel_id and instance.ramdisk_id): + # 2. We only have kernel xor ramdisk + raise exception.NotFound( + _("Instance %(instance_id)s has a kernel or ramdisk but not " + "both" % locals())) + + # 3. We have both kernel and ramdisk + (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk( + self._session, vm) + + LOG.debug(_("Removing kernel/ramdisk files")) + + args = {'kernel-file': kernel, 'ramdisk-file': ramdisk} + task = self._session.async_call_plugin( + 'glance', 'remove_kernel_ramdisk', args) + self._session.wait_for_task(instance.id, task) + + LOG.debug(_("kernel/ramdisk files removed")) + + def _destroy_vm(self, instance, vm): """Destroys a VM record """ + instance_id = instance.id try: - kernel = None - ramdisk = None - if instance.kernel_id or instance.ramdisk_id: - (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk( - self._session, vm) - task1 = self._session.call_xenapi('Async.VM.destroy', vm) - if destroy_kernel_ramdisk: - LOG.debug(_("Removing kernel/ramdisk files")) - fn = "remove_kernel_ramdisk" - args = {} - if kernel: - args['kernel-file'] = kernel - if ramdisk: - args['ramdisk-file'] = ramdisk - task2 = self._session.async_call_plugin('glance', fn, args) - LOG.debug(_("kernel/ramdisk files removed")) - self._session.wait_for_task(instance.id, task1) - if destroy_kernel_ramdisk: - self._session.wait_for_task(instance.id, task2) + task = self._session.call_xenapi('Async.VM.destroy', vm) + self._session.wait_for_task(instance_id, task) except self.XenAPI.Failure, exc: LOG.exception(exc) + LOG.debug(_("Instance %(instance_id)s VM destroyed") % locals()) + def destroy(self, instance): """ Destroy VM instance @@ -482,28 +516,32 @@ class VMOps(object): This is the method exposed by xenapi_conn.destroy(). The rest of the destroy_* methods are internal. """ + instance_id = instance.id + LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals()) vm = VMHelper.lookup(self._session, instance.name) return self._destroy(instance, vm, shutdown=True) def _destroy(self, instance, vm, shutdown=True, - destroy_kernel_ramdisk=True): + destroy_kernel_ramdisk=True): """ Destroys VM instance by performing: - 1. A shutdown if requested - 2. Destroying associated VDIs - 3. Destroying that actual VM record + 1. A shutdown if requested + 2. Destroying associated VDIs + 3. Destroying kernel and ramdisk files (if necessary) + 4. Destroying that actual VM record """ if vm is None: - # Don't complain, just return. This lets us clean up instances - # that have already disappeared from the underlying platform. + LOG.warning(_("VM is not present, skipping destroy...")) return if shutdown: self._shutdown(instance, vm) self._destroy_vdis(instance, vm) - self._destroy_vm(instance, vm, destroy_kernel_ramdisk) + if destroy_kernel_ramdisk: + self._destroy_kernel_ramdisk(instance, vm) + self._destroy_vm(instance, vm) def _wait_with_callback(self, instance_id, task, callback): ret = None diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 3df934af3..f4fb609d7 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -100,6 +100,8 @@ flags.DEFINE_integer('xenapi_vhd_coalesce_max_attempts', 5, 'Max number of times to poll for VHD to coalesce.' ' Used only if connection_type=xenapi.') +flags.DEFINE_string('xenapi_sr_base_path', '/var/run/sr-mount', + 'Base path to the storage repository') flags.DEFINE_string('target_host', None, 'iSCSI Target Host') |
