diff options
| author | John Garbutt <john@johngarbutt.com> | 2013-03-14 15:41:47 +0000 |
|---|---|---|
| committer | Gerrit Code Review <review@openstack.org> | 2013-05-15 14:13:06 +0000 |
| commit | 72e75dbcea66e6e4e2b0f5a931dcd647b2032ad6 (patch) | |
| tree | 652f3a37e97a713ace7a016566dbcd1ad3be42df /nova/virt | |
| parent | af7823f5e7d4533f35841936a6c6fe1cdece6233 (diff) | |
xenapi: ensure vdi is not too big when resizing down
Fix for bug 1155066
This change adds some rollback into the resize disk down
code within xenapi, and reports a better error message
when the disk is too big to resize down.
On a successful rollback, the user is notified of the error
by the instance actions, rather than leaving
the server in the error state.
The user is then able to free up some disk space such that
the resize can work correctly.
Change-Id: Ibad568ab3cfb9caaf4fe002572c8cda973d501a7
Diffstat (limited to 'nova/virt')
| -rwxr-xr-x | nova/virt/xenapi/driver.py | 13 | ||||
| -rw-r--r-- | nova/virt/xenapi/vm_utils.py | 32 | ||||
| -rw-r--r-- | nova/virt/xenapi/vmops.py | 153 | ||||
| -rw-r--r-- | nova/virt/xenapi/volumeops.py | 4 |
4 files changed, 129 insertions, 73 deletions
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py index 02b849a99..5bc1a3049 100755 --- a/nova/virt/xenapi/driver.py +++ b/nova/virt/xenapi/driver.py @@ -239,17 +239,8 @@ class XenAPIDriver(driver.ComputeDriver): """Transfers the VHD of a running instance to another host, then shuts off the instance copies over the COW disk""" # NOTE(vish): Xen currently does not use network info. - rv = self._vmops.migrate_disk_and_power_off(context, instance, - dest, instance_type) - block_device_mapping = driver.block_device_info_get_mapping( - block_device_info) - name_label = self._vmops._get_orig_vm_name_label(instance) - for vol in block_device_mapping: - connection_info = vol['connection_info'] - mount_device = vol['mount_device'].rpartition("/")[2] - self._volumeops.detach_volume(connection_info, - name_label, mount_device) - return rv + return self._vmops.migrate_disk_and_power_off(context, instance, + dest, instance_type, block_device_info) def suspend(self, instance): """suspend the specified instance.""" diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4cd2b2ab5..3f2c6835f 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -286,7 +286,7 @@ def destroy_vm(session, instance, vm_ref): def clean_shutdown_vm(session, instance, vm_ref): - if _is_vm_shutdown(session, vm_ref): + if is_vm_shutdown(session, vm_ref): LOG.warn(_("VM already halted, skipping shutdown..."), instance=instance) return False @@ -301,7 +301,7 @@ def clean_shutdown_vm(session, instance, vm_ref): def hard_shutdown_vm(session, instance, vm_ref): - if _is_vm_shutdown(session, vm_ref): + if is_vm_shutdown(session, vm_ref): LOG.warn(_("VM already halted, skipping shutdown..."), instance=instance) return False @@ -315,7 +315,7 @@ def hard_shutdown_vm(session, instance, vm_ref): return True -def _is_vm_shutdown(session, vm_ref): +def is_vm_shutdown(session, vm_ref): vm_rec = session.call_xenapi("VM.get_record", vm_ref) state = compile_info(vm_rec)['state'] if state == power_state.SHUTDOWN: @@ -2078,6 +2078,21 @@ def _write_partition(virtual_size, dev): LOG.debug(_('Writing partition table %s done.'), dev_path) +def _get_min_sectors(partition_path, block_size=4096): + stdout, _err = utils.execute('resize2fs', '-P', partition_path, + run_as_root=True) + min_size_blocks = long(re.sub('[^0-9]', '', stdout)) + min_size_bytes = min_size_blocks * block_size + return min_size_bytes / SECTOR_SIZE + + +def _repair_filesystem(partition_path): + # Exit Code 1 = File system errors corrected + # 2 = File system errors corrected, system needs a reboot + utils.execute('e2fsck', '-f', '-y', partition_path, run_as_root=True, + check_exit_code=[0, 1, 2]) + + def _resize_part_and_fs(dev, start, old_sectors, new_sectors): """Resize partition and fileystem. @@ -2091,10 +2106,7 @@ def _resize_part_and_fs(dev, start, old_sectors, new_sectors): partition_path = utils.make_dev_path(dev, partition=1) # Replay journal if FS wasn't cleanly unmounted - # Exit Code 1 = File system errors corrected - # 2 = File system errors corrected, system needs a reboot - utils.execute('e2fsck', '-f', '-y', partition_path, run_as_root=True, - check_exit_code=[0, 1, 2]) + _repair_filesystem(partition_path) # Remove ext3 journal (making it ext2) utils.execute('tune2fs', '-O ^has_journal', partition_path, @@ -2102,6 +2114,12 @@ def _resize_part_and_fs(dev, start, old_sectors, new_sectors): if new_sectors < old_sectors: # Resizing down, resize filesystem before partition resize + min_sectors = _get_min_sectors(partition_path) + if min_sectors >= new_sectors: + reason = _('Resize down not allowed because minimum ' + 'filesystem sectors %(min_sectors)d is too big ' + 'for target sectors %(new_sectors)d') + raise exception.ResizeError(reason=(reason % locals())) utils.execute('resize2fs', partition_path, '%ds' % size, run_as_root=True) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 1dabe271d..034556809 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -209,6 +209,9 @@ class VMOps(object): return nova_uuids def confirm_migration(self, migration, instance, network_info): + self._destroy_orig_vm(instance, network_info) + + def _destroy_orig_vm(self, instance, network_info): name_label = self._get_orig_vm_name_label(instance) vm_ref = vm_utils.lookup(self._session, name_label) return self._destroy(instance, vm_ref, network_info=network_info) @@ -227,6 +230,9 @@ class VMOps(object): hotplug=False) def finish_revert_migration(self, instance, block_device_info=None): + self._restore_orig_vm_and_cleanup_orphan(instance, block_device_info) + + def _restore_orig_vm_and_cleanup_orphan(self, instance, block_device_info): # NOTE(sirp): the original vm was suffixed with '-orig'; find it using # the old suffix, remove the suffix, then power it back on. name_label = self._get_orig_vm_name_label(instance) @@ -797,53 +803,84 @@ class VMOps(object): self._virtapi.instance_update(context, instance['uuid'], {'progress': progress}) + def _resize_ensure_vm_is_shutdown(self, instance, vm_ref): + if vm_utils.is_vm_shutdown(self._session, vm_ref): + LOG.debug(_("VM was already shutdown."), instance=instance) + return + + if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref): + LOG.debug(_("Clean shutdown did not complete successfully, " + "trying hard shutdown."), instance=instance) + if not vm_utils.hard_shutdown_vm(self._session, instance, vm_ref): + raise exception.ResizeError( + reason=_("Unable to terminate instance.")) + def _migrate_disk_resizing_down(self, context, instance, dest, instance_type, vm_ref, sr_path): - # 1. NOOP since we're not transmitting the base-copy separately - self._update_instance_progress(context, instance, - step=1, - total_steps=RESIZE_TOTAL_STEPS) + if not instance['auto_disk_config']: + reason = _('Resize down not allowed without auto_disk_config') + raise exception.ResizeError(reason=reason) - vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely( - self._session, vm_ref) - vdi_uuid = vm_vdi_rec['uuid'] + step = make_step_decorator(context, instance, + self._virtapi.instance_update) - old_gb = instance['root_gb'] - new_gb = instance_type['root_gb'] - LOG.debug(_("Resizing down VDI %(vdi_uuid)s from " - "%(old_gb)dGB to %(new_gb)dGB"), locals(), - instance=instance) + @step + def fake_step_to_match_resizing_up(): + pass - # 2. Power down the instance before resizing - if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref): - LOG.debug(_("Clean shutdown did not complete successfully, " - "trying hard shutdown."), instance=instance) - vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) - self._update_instance_progress(context, instance, - step=2, - total_steps=RESIZE_TOTAL_STEPS) + @step + def rename_and_power_off_vm(undo_mgr): + self._resize_ensure_vm_is_shutdown(instance, vm_ref) + self._apply_orig_vm_name_label(instance, vm_ref) - # 3. Copy VDI, resize partition and filesystem, forget VDI, - # truncate VHD - new_ref, new_uuid = vm_utils.resize_disk(self._session, - instance, - vdi_ref, - instance_type) - self._update_instance_progress(context, instance, - step=3, - total_steps=RESIZE_TOTAL_STEPS) + def restore_orig_vm(): + # Do not need to restore block devices, not yet been removed + self._restore_orig_vm_and_cleanup_orphan(instance, None) - # 4. Transfer the new VHD - self._migrate_vhd(instance, new_uuid, dest, sr_path, 0) - self._update_instance_progress(context, instance, - step=4, - total_steps=RESIZE_TOTAL_STEPS) + undo_mgr.undo_with(restore_orig_vm) + + @step + def create_copy_vdi_and_resize(undo_mgr, old_vdi_ref): + new_vdi_ref, new_vdi_uuid = vm_utils.resize_disk(self._session, + instance, old_vdi_ref, instance_type) - # Clean up VDI now that it's been copied - vm_utils.destroy_vdi(self._session, new_ref) + def cleanup_vdi_copy(): + vm_utils.destroy_vdi(self._session, new_vdi_ref) + + undo_mgr.undo_with(cleanup_vdi_copy) + + return new_vdi_ref, new_vdi_uuid + + @step + def transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid): + self._migrate_vhd(instance, new_vdi_uuid, dest, sr_path, 0) + # Clean up VDI now that it's been copied + vm_utils.destroy_vdi(self._session, new_vdi_ref) + + @step + def fake_step_to_be_executed_by_finish_migration(): + pass + + undo_mgr = utils.UndoManager() + try: + fake_step_to_match_resizing_up() + rename_and_power_off_vm(undo_mgr) + old_vdi_ref, _ignore = vm_utils.get_vdi_for_vm_safely( + self._session, vm_ref) + new_vdi_ref, new_vdi_uuid = create_copy_vdi_and_resize( + undo_mgr, old_vdi_ref) + transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid) + except Exception, error: + msg = _("_migrate_disk_resizing_down failed. " + "Restoring orig vm due_to: %{exception}.") + LOG.exception(msg, instance=instance) + undo_mgr._rollback() + raise exception.InstanceFaultRollback(error) def _migrate_disk_resizing_up(self, context, instance, dest, vm_ref, sr_path): + self._apply_orig_vm_name_label(instance, vm_ref) + # 1. Create Snapshot label = "%s-snapshot" % instance['name'] with vm_utils.snapshot_attached_here( @@ -865,10 +902,7 @@ class VMOps(object): total_steps=RESIZE_TOTAL_STEPS) # 3. Now power down the instance - if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref): - LOG.debug(_("Clean shutdown did not complete successfully, " - "trying hard shutdown."), instance=instance) - vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) + self._resize_ensure_vm_is_shutdown(instance, vm_ref) self._update_instance_progress(context, instance, step=3, total_steps=RESIZE_TOTAL_STEPS) @@ -882,8 +916,15 @@ class VMOps(object): step=4, total_steps=RESIZE_TOTAL_STEPS) + def _apply_orig_vm_name_label(self, instance, vm_ref): + # NOTE(sirp): in case we're resizing to the same host (for dev + # purposes), apply a suffix to name-label so the two VM records + # extant until a confirm_resize don't collide. + name_label = self._get_orig_vm_name_label(instance) + vm_utils.set_vm_name_label(self._session, vm_ref, name_label) + def migrate_disk_and_power_off(self, context, instance, dest, - instance_type): + instance_type, block_device_info): """Copies a VHD from one host machine to another, possibly resizing filesystem before hand. @@ -891,23 +932,17 @@ class VMOps(object): :param dest: the destination host machine. :param instance_type: instance_type to resize to """ - vm_ref = self._get_vm_opaque_ref(instance) - sr_path = vm_utils.get_sr_path(self._session) - resize_down = instance['root_gb'] > instance_type['root_gb'] - if resize_down and not instance['auto_disk_config']: - reason = _('Resize down not allowed without auto_disk_config') - raise exception.ResizeError(reason=reason) - # 0. Zero out the progress to begin self._update_instance_progress(context, instance, step=0, total_steps=RESIZE_TOTAL_STEPS) - # NOTE(sirp): in case we're resizing to the same host (for dev - # purposes), apply a suffix to name-label so the two VM records - # extant until a confirm_resize don't collide. - name_label = self._get_orig_vm_name_label(instance) - vm_utils.set_vm_name_label(self._session, vm_ref, name_label) + vm_ref = self._get_vm_opaque_ref(instance) + sr_path = vm_utils.get_sr_path(self._session) + + old_gb = instance['root_gb'] + new_gb = instance_type['root_gb'] + resize_down = old_gb > new_gb if resize_down: self._migrate_disk_resizing_down( @@ -916,12 +951,24 @@ class VMOps(object): self._migrate_disk_resizing_up( context, instance, dest, vm_ref, sr_path) + self._detach_block_devices_from_orig_vm(instance, block_device_info) + # NOTE(sirp): disk_info isn't used by the xenapi driver, instead it # uses a staging-area (/images/instance<uuid>) and sequence-numbered # VHDs to figure out how to reconstruct the VDI chain after syncing disk_info = {} return disk_info + def _detach_block_devices_from_orig_vm(self, instance, block_device_info): + block_device_mapping = virt_driver.block_device_info_get_mapping( + block_device_info) + name_label = self._get_orig_vm_name_label(instance) + for vol in block_device_mapping: + connection_info = vol['connection_info'] + mount_device = vol['mount_device'].rpartition("/")[2] + self._volumeops.detach_volume(connection_info, name_label, + mount_device) + def _resize_instance(self, instance, root_vdi): """Resize an instances root disk.""" diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 5e650f55d..3560edbc1 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -141,7 +141,7 @@ class VolumeOps(object): return # Unplug VBD if we're NOT shutdown - unplug = not vm_utils._is_vm_shutdown(self._session, vm_ref) + unplug = not vm_utils.is_vm_shutdown(self._session, vm_ref) self._detach_vbd(vbd_ref, unplug=unplug) LOG.info(_('Mountpoint %(mountpoint)s detached from instance' @@ -171,7 +171,7 @@ class VolumeOps(object): # Generally speaking, detach_all will be called with VM already # shutdown; however if it's still running, we can still perform the # operation by unplugging the VBD first. - unplug = not vm_utils._is_vm_shutdown(self._session, vm_ref) + unplug = not vm_utils.is_vm_shutdown(self._session, vm_ref) vbd_refs = self._get_all_volume_vbd_refs(vm_ref) for vbd_ref in vbd_refs: |
