summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
Diffstat (limited to 'nova')
-rw-r--r--nova/virt/xenapi/vm_utils.py31
-rw-r--r--nova/virt/xenapi/vmops.py74
-rw-r--r--nova/virt/xenapi_conn.py8
3 files changed, 90 insertions, 23 deletions
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 65de8e361..4afd28dd8 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -22,6 +22,7 @@ their attributes like VDIs, VIFs, as well as their lookup functions.
import os
import pickle
import re
+import time
import urllib
from xml.dom import minidom
@@ -595,6 +596,27 @@ def find_sr(session):
return None
+def remap_vbd_dev(dev):
+ """Return the appropriate location for a plugged-in VBD device
+
+ Ubuntu Maverick moved xvd? -> sd?. This is considered a bug and will be
+ fixed in future versions:
+ https://bugs.launchpad.net/ubuntu/+source/linux/+bug/684875
+
+ For now, we work around it by just doing a string replace.
+ """
+ # NOTE(sirp): This hack can go away when we pull support for Maverick
+ should_remap = FLAGS.xenapi_remap_vbd_dev
+ if not should_remap:
+ return dev
+
+ old_prefix = 'xvd'
+ new_prefix = FLAGS.xenapi_remap_vbd_dev_prefix
+ remapped_dev = dev.replace(old_prefix, new_prefix)
+
+ return remapped_dev
+
+
def with_vdi_attached_here(session, vdi, read_only, f):
this_vm_ref = get_this_vm_ref(session)
vbd_rec = {}
@@ -617,7 +639,13 @@ def with_vdi_attached_here(session, vdi, read_only, f):
LOG.debug(_('Plugging VBD %s ... '), vbd)
session.get_xenapi().VBD.plug(vbd)
LOG.debug(_('Plugging VBD %s done.'), vbd)
- return f(session.get_xenapi().VBD.get_device(vbd))
+ orig_dev = session.get_xenapi().VBD.get_device(vbd)
+ LOG.debug(_('VBD %s plugged as %s'), vbd, orig_dev)
+ dev = remap_vbd_dev(orig_dev)
+ if dev != orig_dev:
+ LOG.debug(_('VBD %(vbd)s plugged into wrong dev, '
+ 'remapping to %(dev)s') % locals())
+ return f(dev)
finally:
LOG.debug(_('Destroying VBD for VDI %s ... '), vdi)
vbd_unplug_with_retry(session, vbd)
@@ -630,6 +658,7 @@ def vbd_unplug_with_retry(session, vbd):
DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
seeing the device still in use, even when all processes using the device
should be dead."""
+ # FIXME(sirp): We can use LoopingCall here w/o blocking sleep()
while True:
try:
session.get_xenapi().VBD.unplug(vbd)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 1fdf79ad5..628a171fa 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -255,41 +255,71 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def destroy(self, instance):
- """Destroy VM instance"""
- vm = VMHelper.lookup(self._session, instance.name)
- return self._destroy(instance, vm, shutdown=True)
-
- def _destroy(self, instance, vm, shutdown=True):
- """ Destroy VM instance """
- if vm is None:
- # Don't complain, just return. This lets us clean up instances
- # that have already disappeared from the underlying platform.
+ def _shutdown(self, instance, vm):
+ """Shutdown an instance """
+ state = self.get_info(instance['name'])['state']
+ if state == power_state.SHUTDOWN:
+ LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") %
+ locals())
return
- # Get the VDIs related to the VM
+
+ try:
+ task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
+ self._session.wait_for_task(instance.id, task)
+ except self.XenAPI.Failure, exc:
+ LOG.exception(exc)
+
+ def _destroy_vdis(self, instance, vm):
+ """Destroys all VDIs associated with a VM """
vdis = VMHelper.lookup_vm_vdis(self._session, vm)
- if shutdown:
+
+ if not vdis:
+ return
+
+ for vdi in vdis:
try:
- task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
+ task = self._session.call_xenapi('Async.VDI.destroy', vdi)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
- # Disk clean-up
- if vdis:
- for vdi in vdis:
- try:
- task = self._session.call_xenapi('Async.VDI.destroy', vdi)
- self._session.wait_for_task(instance.id, task)
- except self.XenAPI.Failure, exc:
- LOG.exception(exc)
- # VM Destroy
+ def _destroy_vm(self, instance, vm):
+ """Destroys a VM record """
try:
task = self._session.call_xenapi('Async.VM.destroy', vm)
self._session.wait_for_task(instance.id, task)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ def destroy(self, instance):
+ """
+ Destroy VM instance
+
+ This is the method exposed by xenapi_conn.destroy(). The rest of the
+ destroy_* methods are internal.
+ """
+ vm = VMHelper.lookup(self._session, instance.name)
+ return self._destroy(instance, vm, shutdown=True)
+
+ def _destroy(self, instance, vm, shutdown=True):
+ """
+ Destroys VM instance by performing:
+
+ 1. A shutdown if requested
+ 2. Destroying associated VDIs
+ 3. Destroying that actual VM record
+ """
+ if vm is None:
+ # Don't complain, just return. This lets us clean up instances
+ # that have already disappeared from the underlying platform.
+ return
+
+ if shutdown:
+ self._shutdown(instance, vm)
+
+ self._destroy_vdis(instance, vm)
+ self._destroy_vm(instance, vm)
+
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 8abd82336..78f0d14b9 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -109,6 +109,14 @@ flags.DEFINE_string('target_port',
flags.DEFINE_string('iqn_prefix',
'iqn.2010-10.org.openstack',
'IQN Prefix')
+# NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick, when we pull
+# support for it, we should remove this
+flags.DEFINE_bool('xenapi_remap_vbd_dev', False,
+ 'Used to enable the remapping of VBD dev '
+ '(Works around an issue in Ubuntu Maverick)')
+flags.DEFINE_string('xenapi_remap_vbd_dev_prefix', 'sd',
+ 'Specify prefix to remap VBD dev to '
+ '(ex. /dev/xvdb -> /dev/sdb)')
def get_connection(_):