summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Erdfelt <johannes.erdfelt@rackspace.com>2012-04-26 23:25:42 +0000
committerJohannes Erdfelt <johannes.erdfelt@rackspace.com>2012-04-27 14:44:48 +0000
commitdf4c02f6303809e1c2627555c1ee18fff8efc3cc (patch)
tree8625128e3cd01f56f5b07b24bc30d25129329962
parent8972e9544dead61c198037f24eecf0f04558a914 (diff)
Disconnect stale instance VDIs when starting nova-compute
Fixes bug 989306 If nova-compute is killed, or the compute domU is shutdown uncleanly, instance VDIs can remain attached to the compute domU. This complicates cleaning up unused VDIs, but can also result in reaching a limit on the number of VBDs for the compute domU, preventing new instances from being built. Change-Id: Ie56e0561aa02178d6718b74a87bbb591fa869007
-rw-r--r--nova/tests/test_xenapi.py17
-rw-r--r--nova/tests/xenapi/stubs.py8
-rw-r--r--nova/virt/xenapi/connection.py10
-rw-r--r--nova/virt/xenapi/fake.py43
-rw-r--r--nova/virt/xenapi/vm_utils.py38
5 files changed, 79 insertions, 37 deletions
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 3e8f6ab74..e513ca0fb 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -238,6 +238,23 @@ class XenAPIVMTestCase(test.TestCase):
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.get_connection(False)
+ def test_init_host(self):
+ session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
+ vm = vm_utils.get_this_vm_ref(session)
+ # Local root disk
+ vdi0 = xenapi_fake.create_vdi('compute', None)
+ vbd0 = xenapi_fake.create_vbd(vm, vdi0)
+ # Instance VDI
+ vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
+ other_config={'nova_instance_uuid': 'aaaa'})
+ vbd1 = xenapi_fake.create_vbd(vm, vdi1)
+ # Only looks like instance VDI
+ vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
+ vbd2 = xenapi_fake.create_vbd(vm, vdi2)
+
+ self.conn.init_host(None)
+ self.assertEquals(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
+
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index e65101253..8ff6b0c50 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -170,7 +170,7 @@ def stubout_loopingcall_start(stubs):
def _make_fake_vdi():
sr_ref = fake.get_all('SR')[0]
- vdi_ref = fake.create_vdi('', False, sr_ref, False)
+ vdi_ref = fake.create_vdi('', sr_ref)
vdi_rec = fake.get_record('VDI', vdi_ref)
return vdi_rec['uuid']
@@ -232,8 +232,7 @@ class FakeSessionForVMTests(fake.SessionBase):
is_control_domain=False)
sr_ref = "fakesr"
- template_vdi_ref = fake.create_vdi(label, read_only=True,
- sr_ref=sr_ref, sharable=False)
+ template_vdi_ref = fake.create_vdi(label, sr_ref, read_only=True)
template_vbd_ref = fake.create_vbd(template_vm_ref, template_vdi_ref)
return template_vm_ref
@@ -373,8 +372,7 @@ def stub_out_migration_methods(stubs):
@classmethod
def fake_get_vdi(cls, session, vm_ref):
- vdi_ref = fake.create_vdi(name_label='derp', read_only=False,
- sr_ref='herp', sharable=False)
+ vdi_ref = fake.create_vdi('derp', 'herp')
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
return vdi_ref, {'uuid': vdi_rec['uuid'], }
diff --git a/nova/virt/xenapi/connection.py b/nova/virt/xenapi/connection.py
index b020a9672..99dee3488 100644
--- a/nova/virt/xenapi/connection.py
+++ b/nova/virt/xenapi/connection.py
@@ -63,6 +63,7 @@ from nova.virt import driver
from nova.virt.xenapi import host
from nova.virt.xenapi import pool
from nova.virt.xenapi import vmops
+from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volumeops
@@ -164,11 +165,10 @@ class XenAPIConnection(driver.ComputeDriver):
return self._host_state
def init_host(self, host):
- #FIXME(armando): implement this
- #NOTE(armando): would we need a method
- #to call when shutting down the host?
- #e.g. to do session logout?
- pass
+ try:
+ vm_utils.cleanup_attached_vdis(self._session)
+ except Exception:
+ LOG.exception(_('Failure while cleaning up attached VDIs'))
def list_instances(self):
"""List VM instances"""
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index ccb18ca2a..bc616abd6 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -141,21 +141,24 @@ def destroy_vdi(vdi_ref):
del _db_content['VDI'][vdi_ref]
-def create_vdi(name_label, read_only, sr_ref, sharable):
- return _create_object('VDI',
- {'name_label': name_label,
- 'read_only': read_only,
- 'SR': sr_ref,
- 'type': '',
- 'name_description': '',
- 'sharable': sharable,
- 'other_config': {},
- 'location': '',
- 'xenstore_data': '',
- 'sm_config': {},
- 'physical_utilisation': '123',
- 'managed': True,
- 'VBDs': {}})
+def create_vdi(name_label, sr_ref, **kwargs):
+ vdi_rec = {
+ 'SR': sr_ref,
+ 'read_only': False,
+ 'type': '',
+ 'name_label': name_label,
+ 'name_description': '',
+ 'sharable': False,
+ 'other_config': {},
+ 'location': '',
+ 'xenstore_data': '',
+ 'sm_config': {},
+ 'physical_utilisation': '123',
+ 'managed': True,
+ 'VBDs': {},
+ }
+ vdi_rec.update(kwargs)
+ return _create_object('VDI', vdi_rec)
def create_vbd(vm_ref, vdi_ref):
@@ -277,7 +280,7 @@ def _create_sr(table, obj):
raise Failure(['SR_UNKNOWN_DRIVER', sr_type])
host_ref = _db_content['host'].keys()[0]
sr_ref = _create_object(table, obj[2])
- vdi_ref = create_vdi('', False, sr_ref, False)
+ vdi_ref = create_vdi('', sr_ref)
pbd_ref = create_pbd('', host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
@@ -423,7 +426,7 @@ class SessionBase(object):
if vdi_per_lun:
# we need to create a vdi because this introduce
# is likely meant for a single vdi
- vdi_ref = create_vdi('', False, sr_ref, False)
+ vdi_ref = create_vdi('', sr_ref)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
return sr_ref
@@ -470,7 +473,8 @@ class SessionBase(object):
name_label = db_ref['name_label']
read_only = db_ref['read_only']
sharable = db_ref['sharable']
- vdi_ref = create_vdi(name_label, read_only, sr_ref, sharable)
+ vdi_ref = create_vdi(name_label, sr_ref, sharable=sharable,
+ read_only=read_only)
return vdi_ref
def VDI_clone(self, _1, vdi_to_clone_ref):
@@ -479,7 +483,8 @@ class SessionBase(object):
read_only = db_ref['read_only']
sr_ref = db_ref['SR']
sharable = db_ref['sharable']
- vdi_ref = create_vdi(name_label, read_only, sr_ref, sharable)
+ vdi_ref = create_vdi(name_label, sr_ref, sharable=sharable,
+ read_only=read_only)
return vdi_ref
def host_compute_free_memory(self, _1, ref):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 2fbfb4105..084005a93 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -319,11 +319,11 @@ class VMHelper(xenapi.HelperBase):
_('Unable to destroy VDI %s') % vdi_ref)
@classmethod
- def create_vdi(cls, session, sr_ref, name_label, name_description,
+ def create_vdi(cls, session, sr_ref, instance, name_description,
virtual_size, read_only=False):
"""Create a VDI record and returns its reference."""
vdi_ref = session.call_xenapi("VDI.create",
- {'name_label': name_label,
+ {'name_label': instance['name'],
'name_description': name_description,
'SR': sr_ref,
'virtual_size': str(virtual_size),
@@ -331,7 +331,7 @@ class VMHelper(xenapi.HelperBase):
'sharable': False,
'read_only': read_only,
'xenstore_data': {},
- 'other_config': {},
+ 'other_config': {'nova_instance_uuid': instance['uuid']},
'sm_config': {},
'tags': []})
LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,'
@@ -466,7 +466,7 @@ class VMHelper(xenapi.HelperBase):
# Create new VDI
vdi_size = instance_type['root_gb'] * 1024 * 1024 * 1024
- new_ref = cls.create_vdi(session, sr_ref, instance.name, 'root',
+ new_ref = cls.create_vdi(session, sr_ref, instance, 'root',
vdi_size)
new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
@@ -525,7 +525,7 @@ class VMHelper(xenapi.HelperBase):
sr_ref = cls.safe_find_sr(session)
ONE_MEG = 1024 * 1024
virtual_size = size_mb * ONE_MEG
- vdi_ref = cls.create_vdi(session, sr_ref, instance.name, name,
+ vdi_ref = cls.create_vdi(session, sr_ref, instance, name,
virtual_size)
try:
@@ -853,9 +853,8 @@ class VMHelper(xenapi.HelperBase):
_("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, "
"max %(max_size)d bytes") % locals())
- name_label = instance.name
- vdi_ref = cls.create_vdi(session, sr_ref, name_label, image_type_str,
- vdi_size)
+ vdi_ref = cls.create_vdi(session, sr_ref, instance, image_type_str,
+ vdi_size)
# From this point we have a VDI on Xen host;
# If anything goes wrong, we need to remember its uuid.
try:
@@ -1459,6 +1458,29 @@ def _wait_for_device(dev):
_('Timeout waiting for device %s to be created') % dev)
+def cleanup_attached_vdis(session):
+ """Unplug any instance VDIs left after an unclean restart"""
+ this_vm_ref = get_this_vm_ref(session)
+
+ vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref)
+ for vbd_ref in vbd_refs:
+ try:
+ vbd_rec = session.call_xenapi('VBD.get_record', vbd_ref)
+ vdi_rec = session.call_xenapi('VDI.get_record', vbd_rec['VDI'])
+ except session.XenAPI.Failure, e:
+ if e.details[0] != 'HANDLE_INVALID':
+ raise
+ continue
+
+ if 'nova_instance_uuid' in vdi_rec['other_config']:
+ # Belongs to an instance and probably left over after an
+ # unclean restart
+ LOG.info(_('Disconnecting stale VDI %s from compute domU'),
+ vdi_rec['uuid'])
+ VMHelper.unplug_vbd(session, vbd_ref)
+ VMHelper.destroy_vbd(session, vbd_ref)
+
+
@contextlib.contextmanager
def vdi_attached_here(session, vdi_ref, read_only=False):
this_vm_ref = get_this_vm_ref(session)