diff options
| author | Renuka Apte <renuka.apte@citrix.com> | 2012-07-30 14:56:24 -0700 |
|---|---|---|
| committer | Renuka Apte <renuka.apte@citrix.com> | 2012-08-20 15:35:20 -0700 |
| commit | cf8b2a2026cf8606de0f9474bd2e1563d662b4ab (patch) | |
| tree | 0f3aca9b05fb5aeb0fe5c664f291f2ce24431f9c /nova | |
| parent | 6eae59d60a8021c8df616f65c1237018ff59d4ad (diff) | |
| download | nova-cf8b2a2026cf8606de0f9474bd2e1563d662b4ab.tar.gz nova-cf8b2a2026cf8606de0f9474bd2e1563d662b4ab.tar.xz nova-cf8b2a2026cf8606de0f9474bd2e1563d662b4ab.zip | |
xenapi: Tag nova volumes during attach_volume
1. Set other_config:'osvol':'True' on VBD during attach.
2. Use this (instead of nodestroys list) to determine which volumes
should not be destroyed on instance terminate.
3. Ensure that the SRs for the attached volumes are forgotten when
instance is terminated.
In the virt/xenapi layer, there is no way to determine which of the
VDIs are nova volumes. This information is required when terminating
and instance, to ensure the volumes are kept intact, as well as other
operations like migrate.
Fixes bug 1030143.
Change-Id: Id1717e64bc29092ce9ffe13b7c254a3867fe8342
Diffstat (limited to 'nova')
| -rw-r--r-- | nova/tests/compute/test_compute.py | 35 | ||||
| -rw-r--r-- | nova/virt/xenapi/fake.py | 15 | ||||
| -rw-r--r-- | nova/virt/xenapi/vm_utils.py | 10 | ||||
| -rw-r--r-- | nova/virt/xenapi/vmops.py | 38 | ||||
| -rw-r--r-- | nova/virt/xenapi/volumeops.py | 4 |
5 files changed, 84 insertions, 18 deletions
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index b6d775bd2..079547a74 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -365,6 +365,41 @@ class ComputeTestCase(BaseTestCase): LOG.info(_("After terminating instances: %s"), instances) self.assertEqual(len(instances), 0) + def test_run_terminate_with_vol_attached(self): + """Make sure it is possible to run and terminate instance with volume + attached + """ + instance = jsonutils.to_primitive(self._create_fake_instance()) + + self.compute.run_instance(self.context, instance=instance) + + instances = db.instance_get_all(context.get_admin_context()) + LOG.info(_("Running instances: %s"), instances) + self.assertEqual(len(instances), 1) + + def fake_check_attach(*args, **kwargs): + pass + + def fake_reserve_volume(*args, **kwargs): + pass + + def fake_volume_get(self, context, volume_id): + return {'id': volume_id} + + self.stubs.Set(nova.volume.api.API, 'get', fake_volume_get) + self.stubs.Set(nova.volume.api.API, 'check_attach', fake_check_attach) + self.stubs.Set(nova.volume.api.API, 'reserve_volume', + fake_reserve_volume) + + self.compute_api.attach_volume(self.context, instance, 1, + '/dev/vdc') + + self.compute.terminate_instance(self.context, instance=instance) + + instances = db.instance_get_all(context.get_admin_context()) + LOG.info(_("After terminating instances: %s"), instances) + self.assertEqual(len(instances), 0) + def test_terminate_no_network(self): # This is as reported in LP bug 1008875 instance = jsonutils.to_primitive(self._create_fake_instance()) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 1f8e400c8..0014b0ee2 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -412,6 +412,21 @@ class SessionBase(object): rec['currently_attached'] = False rec['device'] = '' + def VBD_add_to_other_config(self, _1, vbd_ref, key, value): + db_ref = _db_content['VBD'][vbd_ref] + if not 'other_config' in db_ref: + db_ref['other_config'] = {} + if key in db_ref['other_config']: + raise Failure(['MAP_DUPLICATE_KEY', 'VBD', 'other_config', + vbd_ref, key]) + db_ref['other_config'][key] = value + + def VBD_get_other_config(self, _1, vbd_ref): + db_ref = _db_content['VBD'][vbd_ref] + if not 'other_config' in db_ref: + return {} + return db_ref['other_config'] + def PBD_create(self, _1, pbd_rec): pbd_ref = _create_object('PBD', pbd_rec) _db_content['PBD'][pbd_ref]['currently_attached'] = False diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index ac936d9c4..8fc7f966f 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1246,7 +1246,7 @@ def list_vms(session): yield vm_ref, vm_rec -def lookup_vm_vdis(session, vm_ref, nodestroys=None): +def lookup_vm_vdis(session, vm_ref): """Look for the VDIs that are attached to the VM""" # Firstly we get the VBDs, then the VDIs. # TODO(Armando): do we leave the read-only devices? @@ -1259,11 +1259,13 @@ def lookup_vm_vdis(session, vm_ref, nodestroys=None): # Test valid VDI record = session.call_xenapi("VDI.get_record", vdi_ref) LOG.debug(_('VDI %s is still available'), record['uuid']) + vbd_other_config = session.call_xenapi("VBD.get_other_config", + vbd_ref) + if not vbd_other_config.get('osvol'): + # This is not an attached volume + vdi_refs.append(vdi_ref) except session.XenAPI.Failure, exc: LOG.exception(exc) - else: - if not nodestroys or record['uuid'] not in nodestroys: - vdi_refs.append(vdi_ref) return vdi_refs diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 41e1f6dd6..0b49bff39 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -900,28 +900,37 @@ class VMOps(object): raise exception.NotFound(_("Unable to find root VBD/VDI for VM")) + def _detach_vm_vols(self, instance, vm_ref, block_device_info=None): + """Detach any external nova/cinder volumes and purge the SRs. + This differs from a normal detach in that the VM has been + shutdown, so there is no need for unplugging VBDs. They do + need to be destroyed, so that the SR can be forgotten. + """ + vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref) + for vbd_ref in vbd_refs: + other_config = self._session.call_xenapi("VBD.get_other_config", + vbd_ref) + if other_config.get('osvol'): + # this is a nova/cinder volume + try: + sr_ref = volume_utils.find_sr_from_vbd(self._session, + vbd_ref) + vm_utils.destroy_vbd(self._session, vbd_ref) + # Forget SR only if not in use + volume_utils.purge_sr(self._session, sr_ref) + except Exception as exc: + LOG.exception(exc) + raise + def _destroy_vdis(self, instance, vm_ref, block_device_info=None): """Destroys all VDIs associated with a VM.""" instance_uuid = instance['uuid'] LOG.debug(_("Destroying VDIs for Instance %(instance_uuid)s") % locals()) - nodestroy = [] - if block_device_info: - for bdm in block_device_info['block_device_mapping']: - LOG.debug(bdm) - # If there is no associated VDI, skip it - if 'vdi_uuid' not in bdm['connection_info']['data']: - LOG.debug(_("BDM contains no vdi_uuid"), instance=instance) - continue - # bdm vols should be left alone if delete_on_termination - # is false, or they will be destroyed on cleanup_volumes - nodestroy.append(bdm['connection_info']['data']['vdi_uuid']) - - vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref, nodestroy) + vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref) if not vdi_refs: return - for vdi_ref in vdi_refs: try: vm_utils.destroy_vdi(self._session, vdi_ref) @@ -1017,6 +1026,7 @@ class VMOps(object): vm_utils.shutdown_vm(self._session, instance, vm_ref) # Destroy VDIs + self._detach_vm_vols(instance, vm_ref, block_device_info) self._destroy_vdis(instance, vm_ref, block_device_info) self._destroy_kernel_ramdisk(instance, vm_ref) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 99fb968d2..ec495ebc1 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -184,6 +184,10 @@ class VolumeOps(object): try: self._session.call_xenapi("VBD.plug", vbd_ref) + # set osvol=True in other-config to indicate this is an + # attached nova (or cinder) volume + self._session.call_xenapi("VBD.add_to_other_config", + vbd_ref, 'osvol', "True") except self._session.XenAPI.Failure, exc: LOG.exception(exc) self.forget_sr(uuid) |
