summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRick Harris <rconradharris@gmail.com>2013-02-04 22:54:15 +0000
committerRick Harris <rconradharris@gmail.com>2013-02-07 18:48:43 +0000
commit7c6a9e416231e19443f66426531e5b7967a11bb0 (patch)
tree281971bc0630b8c6fc222a7c0f7d043d425ca4e1
parent47bbf12a6c9705e5abca29a1d44b753c8506505d (diff)
downloadnova-7c6a9e416231e19443f66426531e5b7967a11bb0.tar.gz
nova-7c6a9e416231e19443f66426531e5b7967a11bb0.tar.xz
nova-7c6a9e416231e19443f66426531e5b7967a11bb0.zip
xenapi: Cleanup detach_volume code
Refactors `detach_volume` so that it uses the same code to detach that detaching all volumes uses. Change-Id: Idb7acb136367d8ad512d4b14c775c69404f6f6b0
-rw-r--r--nova/virt/xenapi/vmops.py24
-rw-r--r--nova/virt/xenapi/volumeops.py73
2 files changed, 42 insertions, 55 deletions
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 8a76f3368..3148fe801 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -1026,28 +1026,6 @@ class VMOps(object):
raise exception.NotFound(_("Unable to find root VBD/VDI for VM"))
- def _detach_vm_vols(self, instance, vm_ref):
- """Detach any external nova/cinder volumes and purge the SRs.
- This differs from a normal detach in that the VM has been
- shutdown, so there is no need for unplugging VBDs. They do
- need to be destroyed, so that the SR can be forgotten.
- """
- vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
- for vbd_ref in vbd_refs:
- other_config = self._session.call_xenapi("VBD.get_other_config",
- vbd_ref)
- if other_config.get('osvol'):
- # this is a nova/cinder volume
- try:
- sr_ref = volume_utils.find_sr_from_vbd(self._session,
- vbd_ref)
- vm_utils.destroy_vbd(self._session, vbd_ref)
- # Forget SR only if not in use
- volume_utils.purge_sr(self._session, sr_ref)
- except Exception as exc:
- LOG.exception(exc)
- raise
-
def _destroy_vdis(self, instance, vm_ref):
"""Destroys all VDIs associated with a VM."""
LOG.debug(_("Destroying VDIs"), instance=instance)
@@ -1154,7 +1132,7 @@ class VMOps(object):
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
if destroy_disks:
- self._detach_vm_vols(instance, vm_ref)
+ self._volumeops.detach_all(vm_ref)
self._destroy_vdis(instance, vm_ref)
self._destroy_kernel_ramdisk(instance, vm_ref)
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index c2d717cfd..0c8a9e1c7 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -115,39 +115,48 @@ class VolumeOps(object):
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
-
- vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
-
- # Detach VBD from VM
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
% locals())
- device_number = volume_utils.get_device_number(mountpoint)
- try:
- vbd_ref = vm_utils.find_vbd_by_number(self._session, vm_ref,
- device_number)
- sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
- except volume_utils.StorageError, exc:
- LOG.exception(exc)
- raise Exception(_('Unable to locate volume %s') % mountpoint)
-
- try:
- if not vm_utils._is_vm_shutdown(self._session, vm_ref):
- vm_utils.unplug_vbd(self._session, vbd_ref)
- except volume_utils.StorageError, exc:
- LOG.exception(exc)
- raise Exception(_('Unable to detach volume %s') % mountpoint)
- try:
- vm_utils.destroy_vbd(self._session, vbd_ref)
- except volume_utils.StorageError, exc:
- LOG.exception(exc)
- raise Exception(_('Unable to destroy vbd %s') % mountpoint)
- # Forget SR only if no other volumes on this host are using it
- try:
- volume_utils.purge_sr(self._session, sr_ref)
- except volume_utils.StorageError, exc:
- LOG.exception(exc)
- raise Exception(_('Error purging SR %s') % sr_ref)
+ device_number = volume_utils.get_device_number(mountpoint)
+ vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
- LOG.info(_('Mountpoint %(mountpoint)s detached from'
- ' instance %(instance_name)s') % locals())
+ vbd_ref = vm_utils.find_vbd_by_number(
+ self._session, vm_ref, device_number)
+
+ # Unplug VBD if we're NOT shutdown
+ unplug = not vm_utils._is_vm_shutdown(self._session, vm_ref)
+ self._detach_vbd(vbd_ref, unplug=unplug)
+
+ LOG.info(_('Mountpoint %(mountpoint)s detached from instance'
+ ' %(instance_name)s') % locals())
+
+ def _get_all_volume_vbd_refs(self, vm_ref):
+ """Return VBD refs for all Nova/Cinder volumes."""
+ vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
+ for vbd_ref in vbd_refs:
+ other_config = self._session.call_xenapi(
+ "VBD.get_other_config", vbd_ref)
+ if other_config.get('osvol'):
+ yield vbd_ref
+
+ def _detach_vbd(self, vbd_ref, unplug=False):
+ if unplug:
+ vm_utils.unplug_vbd(self._session, vbd_ref)
+
+ sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
+ vm_utils.destroy_vbd(self._session, vbd_ref)
+
+ # Forget SR only if not in use
+ volume_utils.purge_sr(self._session, sr_ref)
+
+ def detach_all(self, vm_ref):
+ """Detach any external nova/cinder volumes and purge the SRs."""
+ # Generally speaking, detach_all will be called with VM already
+ # shutdown; however if it's still running, we can still perform the
+ # operation by unplugging the VBD first.
+ unplug = not vm_utils._is_vm_shutdown(self._session, vm_ref)
+
+ vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
+ for vbd_ref in vbd_refs:
+ self._detach_vbd(vbd_ref, unplug=unplug)