diff options
author | Jenkins <jenkins@review.openstack.org> | 2013-02-10 16:56:34 +0000 |
---|---|---|
committer | Gerrit Code Review <review@openstack.org> | 2013-02-10 16:56:34 +0000 |
commit | dc49288ac03a56e23c3634238d0da099cea351b6 (patch) | |
tree | 536adc2668e2ed23cca0f75ecd7424183a73fe77 | |
parent | 5911e9acb781c51f584d49acc6d0225eb84a5b9b (diff) | |
parent | 7c6a9e416231e19443f66426531e5b7967a11bb0 (diff) | |
download | nova-dc49288ac03a56e23c3634238d0da099cea351b6.tar.gz nova-dc49288ac03a56e23c3634238d0da099cea351b6.tar.xz nova-dc49288ac03a56e23c3634238d0da099cea351b6.zip |
Merge "xenapi: Cleanup detach_volume code"
-rw-r--r-- | nova/virt/xenapi/vmops.py | 24 | ||||
-rw-r--r-- | nova/virt/xenapi/volumeops.py | 73 |
2 files changed, 42 insertions, 55 deletions
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 9124b4dbe..5fca96817 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -1027,28 +1027,6 @@ class VMOps(object): raise exception.NotFound(_("Unable to find root VBD/VDI for VM")) - def _detach_vm_vols(self, instance, vm_ref): - """Detach any external nova/cinder volumes and purge the SRs. - This differs from a normal detach in that the VM has been - shutdown, so there is no need for unplugging VBDs. They do - need to be destroyed, so that the SR can be forgotten. - """ - vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref) - for vbd_ref in vbd_refs: - other_config = self._session.call_xenapi("VBD.get_other_config", - vbd_ref) - if other_config.get('osvol'): - # this is a nova/cinder volume - try: - sr_ref = volume_utils.find_sr_from_vbd(self._session, - vbd_ref) - vm_utils.destroy_vbd(self._session, vbd_ref) - # Forget SR only if not in use - volume_utils.purge_sr(self._session, sr_ref) - except Exception as exc: - LOG.exception(exc) - raise - def _destroy_vdis(self, instance, vm_ref): """Destroys all VDIs associated with a VM.""" LOG.debug(_("Destroying VDIs"), instance=instance) @@ -1155,7 +1133,7 @@ class VMOps(object): vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) if destroy_disks: - self._detach_vm_vols(instance, vm_ref) + self._volumeops.detach_all(vm_ref) self._destroy_vdis(instance, vm_ref) self._destroy_kernel_ramdisk(instance, vm_ref) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index c2d717cfd..0c8a9e1c7 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -115,39 +115,48 @@ class VolumeOps(object): def detach_volume(self, connection_info, instance_name, mountpoint): """Detach volume storage to VM instance.""" - - vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name) - - # Detach VBD from VM LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s") % locals()) - device_number = volume_utils.get_device_number(mountpoint) - try: - vbd_ref = vm_utils.find_vbd_by_number(self._session, vm_ref, - device_number) - sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref) - except volume_utils.StorageError, exc: - LOG.exception(exc) - raise Exception(_('Unable to locate volume %s') % mountpoint) - - try: - if not vm_utils._is_vm_shutdown(self._session, vm_ref): - vm_utils.unplug_vbd(self._session, vbd_ref) - except volume_utils.StorageError, exc: - LOG.exception(exc) - raise Exception(_('Unable to detach volume %s') % mountpoint) - try: - vm_utils.destroy_vbd(self._session, vbd_ref) - except volume_utils.StorageError, exc: - LOG.exception(exc) - raise Exception(_('Unable to destroy vbd %s') % mountpoint) - # Forget SR only if no other volumes on this host are using it - try: - volume_utils.purge_sr(self._session, sr_ref) - except volume_utils.StorageError, exc: - LOG.exception(exc) - raise Exception(_('Error purging SR %s') % sr_ref) + device_number = volume_utils.get_device_number(mountpoint) + vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name) - LOG.info(_('Mountpoint %(mountpoint)s detached from' - ' instance %(instance_name)s') % locals()) + vbd_ref = vm_utils.find_vbd_by_number( + self._session, vm_ref, device_number) + + # Unplug VBD if we're NOT shutdown + unplug = not vm_utils._is_vm_shutdown(self._session, vm_ref) + self._detach_vbd(vbd_ref, unplug=unplug) + + LOG.info(_('Mountpoint %(mountpoint)s detached from instance' + ' %(instance_name)s') % locals()) + + def _get_all_volume_vbd_refs(self, vm_ref): + """Return VBD refs for all Nova/Cinder volumes.""" + vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref) + for vbd_ref in vbd_refs: + other_config = self._session.call_xenapi( + "VBD.get_other_config", vbd_ref) + if other_config.get('osvol'): + yield vbd_ref + + def _detach_vbd(self, vbd_ref, unplug=False): + if unplug: + vm_utils.unplug_vbd(self._session, vbd_ref) + + sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref) + vm_utils.destroy_vbd(self._session, vbd_ref) + + # Forget SR only if not in use + volume_utils.purge_sr(self._session, sr_ref) + + def detach_all(self, vm_ref): + """Detach any external nova/cinder volumes and purge the SRs.""" + # Generally speaking, detach_all will be called with VM already + # shutdown; however if it's still running, we can still perform the + # operation by unplugging the VBD first. + unplug = not vm_utils._is_vm_shutdown(self._session, vm_ref) + + vbd_refs = self._get_all_volume_vbd_refs(vm_ref) + for vbd_ref in vbd_refs: + self._detach_vbd(vbd_ref, unplug=unplug) |