diff options
| author | Jenkins <jenkins@review.openstack.org> | 2012-08-22 21:10:42 +0000 |
|---|---|---|
| committer | Gerrit Code Review <review@openstack.org> | 2012-08-22 21:10:42 +0000 |
| commit | 48bc20fcbf57870b150977b001ab413aaff3dc51 (patch) | |
| tree | a945ea28c34dbe654a0bdb7fb068fceaae57d810 | |
| parent | 9478eb9af548f0100039c7f994e1799de57de71e (diff) | |
| parent | cf8b2a2026cf8606de0f9474bd2e1563d662b4ab (diff) | |
Merge "xenapi: Tag nova volumes during attach_volume"
| -rw-r--r-- | nova/tests/compute/test_compute.py | 35 | ||||
| -rw-r--r-- | nova/virt/xenapi/fake.py | 15 | ||||
| -rw-r--r-- | nova/virt/xenapi/vm_utils.py | 10 | ||||
| -rw-r--r-- | nova/virt/xenapi/vmops.py | 38 | ||||
| -rw-r--r-- | nova/virt/xenapi/volumeops.py | 4 |
5 files changed, 84 insertions, 18 deletions
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 3812a6e96..79ef9beaf 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -454,6 +454,41 @@ class ComputeTestCase(BaseTestCase): LOG.info(_("After terminating instances: %s"), instances) self.assertEqual(len(instances), 0) + def test_run_terminate_with_vol_attached(self): + """Make sure it is possible to run and terminate instance with volume + attached + """ + instance = jsonutils.to_primitive(self._create_fake_instance()) + + self.compute.run_instance(self.context, instance=instance) + + instances = db.instance_get_all(context.get_admin_context()) + LOG.info(_("Running instances: %s"), instances) + self.assertEqual(len(instances), 1) + + def fake_check_attach(*args, **kwargs): + pass + + def fake_reserve_volume(*args, **kwargs): + pass + + def fake_volume_get(self, context, volume_id): + return {'id': volume_id} + + self.stubs.Set(nova.volume.api.API, 'get', fake_volume_get) + self.stubs.Set(nova.volume.api.API, 'check_attach', fake_check_attach) + self.stubs.Set(nova.volume.api.API, 'reserve_volume', + fake_reserve_volume) + + self.compute_api.attach_volume(self.context, instance, 1, + '/dev/vdc') + + self.compute.terminate_instance(self.context, instance=instance) + + instances = db.instance_get_all(context.get_admin_context()) + LOG.info(_("After terminating instances: %s"), instances) + self.assertEqual(len(instances), 0) + def test_terminate_no_network(self): # This is as reported in LP bug 1008875 instance = jsonutils.to_primitive(self._create_fake_instance()) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 7cb6b5f59..d6b523225 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -414,6 +414,21 @@ class SessionBase(object): rec['currently_attached'] = False rec['device'] = '' + def VBD_add_to_other_config(self, _1, vbd_ref, key, value): + db_ref = _db_content['VBD'][vbd_ref] + if not 'other_config' in db_ref: + db_ref['other_config'] = {} + if key in db_ref['other_config']: + raise Failure(['MAP_DUPLICATE_KEY', 'VBD', 'other_config', + vbd_ref, key]) + db_ref['other_config'][key] = value + + def VBD_get_other_config(self, _1, vbd_ref): + db_ref = _db_content['VBD'][vbd_ref] + if not 'other_config' in db_ref: + return {} + return db_ref['other_config'] + def PBD_create(self, _1, pbd_rec): pbd_ref = _create_object('PBD', pbd_rec) _db_content['PBD'][pbd_ref]['currently_attached'] = False diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index c5cf81780..744770528 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -1246,7 +1246,7 @@ def list_vms(session): yield vm_ref, vm_rec -def lookup_vm_vdis(session, vm_ref, nodestroys=None): +def lookup_vm_vdis(session, vm_ref): """Look for the VDIs that are attached to the VM""" # Firstly we get the VBDs, then the VDIs. # TODO(Armando): do we leave the read-only devices? @@ -1259,11 +1259,13 @@ def lookup_vm_vdis(session, vm_ref, nodestroys=None): # Test valid VDI record = session.call_xenapi("VDI.get_record", vdi_ref) LOG.debug(_('VDI %s is still available'), record['uuid']) + vbd_other_config = session.call_xenapi("VBD.get_other_config", + vbd_ref) + if not vbd_other_config.get('osvol'): + # This is not an attached volume + vdi_refs.append(vdi_ref) except session.XenAPI.Failure, exc: LOG.exception(exc) - else: - if not nodestroys or record['uuid'] not in nodestroys: - vdi_refs.append(vdi_ref) return vdi_refs diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 41e1f6dd6..0b49bff39 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -900,28 +900,37 @@ class VMOps(object): raise exception.NotFound(_("Unable to find root VBD/VDI for VM")) + def _detach_vm_vols(self, instance, vm_ref, block_device_info=None): + """Detach any external nova/cinder volumes and purge the SRs. + This differs from a normal detach in that the VM has been + shutdown, so there is no need for unplugging VBDs. They do + need to be destroyed, so that the SR can be forgotten. + """ + vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref) + for vbd_ref in vbd_refs: + other_config = self._session.call_xenapi("VBD.get_other_config", + vbd_ref) + if other_config.get('osvol'): + # this is a nova/cinder volume + try: + sr_ref = volume_utils.find_sr_from_vbd(self._session, + vbd_ref) + vm_utils.destroy_vbd(self._session, vbd_ref) + # Forget SR only if not in use + volume_utils.purge_sr(self._session, sr_ref) + except Exception as exc: + LOG.exception(exc) + raise + def _destroy_vdis(self, instance, vm_ref, block_device_info=None): """Destroys all VDIs associated with a VM.""" instance_uuid = instance['uuid'] LOG.debug(_("Destroying VDIs for Instance %(instance_uuid)s") % locals()) - nodestroy = [] - if block_device_info: - for bdm in block_device_info['block_device_mapping']: - LOG.debug(bdm) - # If there is no associated VDI, skip it - if 'vdi_uuid' not in bdm['connection_info']['data']: - LOG.debug(_("BDM contains no vdi_uuid"), instance=instance) - continue - # bdm vols should be left alone if delete_on_termination - # is false, or they will be destroyed on cleanup_volumes - nodestroy.append(bdm['connection_info']['data']['vdi_uuid']) - - vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref, nodestroy) + vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref) if not vdi_refs: return - for vdi_ref in vdi_refs: try: vm_utils.destroy_vdi(self._session, vdi_ref) @@ -1017,6 +1026,7 @@ class VMOps(object): vm_utils.shutdown_vm(self._session, instance, vm_ref) # Destroy VDIs + self._detach_vm_vols(instance, vm_ref, block_device_info) self._destroy_vdis(instance, vm_ref, block_device_info) self._destroy_kernel_ramdisk(instance, vm_ref) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 99fb968d2..ec495ebc1 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -184,6 +184,10 @@ class VolumeOps(object): try: self._session.call_xenapi("VBD.plug", vbd_ref) + # set osvol=True in other-config to indicate this is an + # attached nova (or cinder) volume + self._session.call_xenapi("VBD.add_to_other_config", + vbd_ref, 'osvol', "True") except self._session.XenAPI.Failure, exc: LOG.exception(exc) self.forget_sr(uuid) |
