diff options
author | Oleg Bondarev <obondarev@mirantis.com> | 2013-04-26 12:52:26 +0400 |
---|---|---|
committer | Mark McLoughlin <markmc@redhat.com> | 2013-05-14 09:55:23 +0100 |
commit | 586e752e69ca891714f390bf59ad30d5081d4498 (patch) | |
tree | 39a29f9cbd6e73b57893ab575c5275cd46f3915c | |
parent | 28f0b01717f17ecc545a25f6deb0aa240e5aaf0d (diff) | |
download | nova-586e752e69ca891714f390bf59ad30d5081d4498.tar.gz nova-586e752e69ca891714f390bf59ad30d5081d4498.tar.xz nova-586e752e69ca891714f390bf59ad30d5081d4498.zip |
Refactor nova.volume.cinder.API to reduce roundtrips with Cinder
Make cinder.API methods accept volume_id instead of the whole volume object.
This will remove redundant roundtrip to get the volume before
passing it to other methods as in fact they only need the id.
Fixes bug 1172297
Change-Id: I5e7f944c1c29b2f211ece2ef86c0959c81e806df
-rw-r--r-- | nova/api/ec2/cloud.py | 10 | ||||
-rw-r--r-- | nova/api/openstack/compute/contrib/volumes.py | 11 | ||||
-rw-r--r-- | nova/compute/api.py | 13 | ||||
-rwxr-xr-x | nova/compute/manager.py | 51 | ||||
-rw-r--r-- | nova/tests/api/ec2/test_cinder_cloud.py | 10 | ||||
-rw-r--r-- | nova/tests/api/openstack/compute/contrib/test_volumes.py | 4 | ||||
-rw-r--r-- | nova/tests/api/openstack/compute/test_server_actions.py | 2 | ||||
-rw-r--r-- | nova/tests/api/openstack/fakes.py | 10 | ||||
-rw-r--r-- | nova/tests/compute/test_compute.py | 14 | ||||
-rw-r--r-- | nova/tests/fake_volume.py | 54 | ||||
-rw-r--r-- | nova/tests/integrated/test_api_samples.py | 1 | ||||
-rw-r--r-- | nova/tests/volume/test_cinder.py | 24 | ||||
-rw-r--r-- | nova/volume/cinder.py | 158 |
13 files changed, 182 insertions, 180 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 3d210404c..33ffbbc21 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -391,8 +391,8 @@ class CloudController(object): LOG.audit(_("Create snapshot of volume %s"), volume_id, context=context) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) - volume = self.volume_api.get(context, volume_id) - args = (context, volume, kwargs.get('name'), kwargs.get('description')) + args = (context, volume_id, kwargs.get('name'), + kwargs.get('description')) if kwargs.get('force', False): snapshot = self.volume_api.create_snapshot_force(*args) else: @@ -403,8 +403,7 @@ class CloudController(object): def delete_snapshot(self, context, snapshot_id, **kwargs): snapshot_id = ec2utils.ec2_snap_id_to_uuid(snapshot_id) - snapshot = self.volume_api.get_snapshot(context, snapshot_id) - self.volume_api.delete_snapshot(context, snapshot) + self.volume_api.delete_snapshot(context, snapshot_id) return True def describe_key_pairs(self, context, key_name=None, **kwargs): @@ -863,8 +862,7 @@ class CloudController(object): validate_ec2_id(volume_id) volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id) try: - volume = self.volume_api.get(context, volume_id) - self.volume_api.delete(context, volume) + self.volume_api.delete(context, volume_id) except exception.InvalidVolume: raise exception.EC2APIError(_('Delete Failed')) diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py index 640ac0c76..6b598c6eb 100644 --- a/nova/api/openstack/compute/contrib/volumes.py +++ b/nova/api/openstack/compute/contrib/volumes.py @@ -187,8 +187,7 @@ class VolumeController(wsgi.Controller): LOG.audit(_("Delete volume with id: %s"), id, context=context) try: - vol = self.volume_api.get(context, id) - self.volume_api.delete(context, vol) + self.volume_api.delete(context, id) except exception.NotFound: raise exc.HTTPNotFound() return webob.Response(status_int=202) @@ -573,8 +572,7 @@ class SnapshotController(wsgi.Controller): LOG.audit(_("Delete snapshot with id: %s"), id, context=context) try: - snapshot = self.volume_api.get_snapshot(context, id) - self.volume_api.delete_snapshot(context, snapshot) + self.volume_api.delete_snapshot(context, id) except exception.NotFound: return exc.HTTPNotFound() return webob.Response(status_int=202) @@ -610,7 +608,6 @@ class SnapshotController(wsgi.Controller): snapshot = body['snapshot'] volume_id = snapshot['volume_id'] - vol = self.volume_api.get(context, volume_id) force = snapshot.get('force', False) LOG.audit(_("Create snapshot from volume %s"), volume_id, @@ -622,12 +619,12 @@ class SnapshotController(wsgi.Controller): if utils.bool_from_str(force): new_snapshot = self.volume_api.create_snapshot_force(context, - vol, + volume_id, snapshot.get('display_name'), snapshot.get('display_description')) else: new_snapshot = self.volume_api.create_snapshot(context, - vol, + volume_id, snapshot.get('display_name'), snapshot.get('display_description')) diff --git a/nova/compute/api.py b/nova/compute/api.py index 9d73c3055..1d392c7b5 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1182,18 +1182,17 @@ class API(base.Base): # cleanup volumes for bdm in bdms: if bdm['volume_id']: - volume = self.volume_api.get(context, bdm['volume_id']) # NOTE(vish): We don't have access to correct volume # connector info, so just pass a fake # connector. This can be improved when we # expose get_volume_connector to rpc. connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'} self.volume_api.terminate_connection(context, - volume, + bdm['volume_id'], connector) - self.volume_api.detach(elevated, volume) + self.volume_api.detach(elevated, bdm['volume_id']) if bdm['delete_on_termination']: - self.volume_api.delete(context, volume) + self.volume_api.delete(context, bdm['volume_id']) self.db.block_device_mapping_destroy(context, bdm['id']) instance = self._instance_update(context, instance_uuid, @@ -1613,7 +1612,7 @@ class API(base.Base): # short time, it doesn't matter for now. name = _('snapshot for %s') % image_meta['name'] snapshot = self.volume_api.create_snapshot_force( - context, volume, name, volume['display_description']) + context, volume['id'], name, volume['display_description']) bdm['snapshot_id'] = snapshot['id'] del bdm['volume_id'] @@ -2306,7 +2305,7 @@ class API(base.Base): try: volume = self.volume_api.get(context, volume_id) self.volume_api.check_attach(context, volume, instance=instance) - self.volume_api.reserve_volume(context, volume) + self.volume_api.reserve_volume(context, volume_id) self.compute_rpcapi.attach_volume(context, instance=instance, volume_id=volume_id, mountpoint=device) except Exception: @@ -2321,7 +2320,7 @@ class API(base.Base): it easier for cells version to override. """ self.volume_api.check_detach(context, volume) - self.volume_api.begin_detaching(context, volume) + self.volume_api.begin_detaching(context, volume['id']) self.compute_rpcapi.detach_volume(context, instance=instance, volume_id=volume['id']) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 16ea63f9f..4f32e5132 100755 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -793,7 +793,7 @@ class ComputeManager(manager.SchedulerDependentManager): if bdm['volume_id'] is not None: volume = self.volume_api.get(context, bdm['volume_id']) self.volume_api.check_attach(context, volume, - instance=instance) + instance=instance) cinfo = self._attach_volume_boot(context, instance, volume, @@ -1316,12 +1316,11 @@ class ComputeManager(manager.SchedulerDependentManager): try: # NOTE(vish): actual driver detach done in driver.destroy, so # just tell nova-volume that we are done with it. - volume = self.volume_api.get(context, bdm['volume_id']) connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, - volume, + bdm['volume_id'], connector) - self.volume_api.detach(context, volume) + self.volume_api.detach(context, bdm['volume_id']) except exception.DiskNotFound as exc: LOG.warn(_('Ignoring DiskNotFound: %s') % exc, instance=instance) @@ -1336,8 +1335,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.debug(_("terminating bdm %s") % bdm, instance_uuid=instance_uuid) if bdm['volume_id'] and bdm['delete_on_termination']: - volume = self.volume_api.get(context, bdm['volume_id']) - self.volume_api.delete(context, volume) + self.volume_api.delete(context, bdm['volume_id']) # NOTE(vish): bdms will be deleted on instance destroy @hooks.add_hook("delete_instance") @@ -1652,8 +1650,7 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(sirp): this detach is necessary b/c we will reattach the # volumes in _prep_block_devices below. for bdm in self._get_volume_bdms(bdms): - volume = self.volume_api.get(context, bdm['volume_id']) - self.volume_api.detach(context, volume) + self.volume_api.detach(context, bdm['volume_id']) if not recreate: block_device_info = self._get_volume_block_device_info( @@ -1726,7 +1723,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.info(_("Detaching from volume api: %s") % volume_id) volume = self.volume_api.get(context, volume_id) self.volume_api.check_detach(context, volume) - self.volume_api.begin_detaching(context, volume) + self.volume_api.begin_detaching(context, volume_id) # Manager-detach self.detach_volume(context, volume_id, instance) @@ -2209,9 +2206,8 @@ class ComputeManager(manager.SchedulerDependentManager): connector = self.driver.get_volume_connector(instance) for bdm in bdms: - volume = self.volume_api.get(context, bdm['volume_id']) cinfo = self.volume_api.initialize_connection( - context, volume, connector) + context, bdm['volume_id'], connector) self.conductor_api.block_device_mapping_update( context, bdm['id'], @@ -2473,9 +2469,8 @@ class ComputeManager(manager.SchedulerDependentManager): if bdms: connector = self.driver.get_volume_connector(instance) for bdm in bdms: - volume = self.volume_api.get(context, bdm['volume_id']) - self.volume_api.terminate_connection(context, volume, - connector) + self.volume_api.terminate_connection(context, bdm['volume_id'], + connector) def _finish_resize(self, context, instance, migration, disk_info, image): @@ -2880,9 +2875,9 @@ class ComputeManager(manager.SchedulerDependentManager): locals(), context=context, instance=instance) connector = self.driver.get_volume_connector(instance) connection_info = self.volume_api.initialize_connection(context, - volume, + volume_id, connector) - self.volume_api.attach(context, volume, instance_uuid, mountpoint) + self.volume_api.attach(context, volume_id, instance_uuid, mountpoint) return connection_info @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -2925,14 +2920,13 @@ class ComputeManager(manager.SchedulerDependentManager): context, instance, mountpoint) def _attach_volume(self, context, volume_id, mountpoint, instance): - volume = self.volume_api.get(context, volume_id) context = context.elevated() LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'), locals(), context=context, instance=instance) try: connector = self.driver.get_volume_connector(instance) connection_info = self.volume_api.initialize_connection(context, - volume, + volume_id, connector) except Exception: # pylint: disable=W0702 with excutils.save_and_reraise_exception(): @@ -2940,7 +2934,7 @@ class ComputeManager(manager.SchedulerDependentManager): "while attaching at %(mountpoint)s") LOG.exception(msg % locals(), context=context, instance=instance) - self.volume_api.unreserve_volume(context, volume) + self.volume_api.unreserve_volume(context, volume_id) if 'serial' not in connection_info: connection_info['serial'] = volume_id @@ -2956,11 +2950,11 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.exception(msg % locals(), context=context, instance=instance) self.volume_api.terminate_connection(context, - volume, + volume_id, connector) self.volume_api.attach(context, - volume, + volume_id, instance['uuid'], mountpoint) values = { @@ -3001,8 +2995,7 @@ class ComputeManager(manager.SchedulerDependentManager): msg = _("Failed to detach volume %(volume_id)s from %(mp)s") LOG.exception(msg % locals(), context=context, instance=instance) - volume = self.volume_api.get(context, volume_id) - self.volume_api.roll_detaching(context, volume) + self.volume_api.roll_detaching(context, volume_id) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @reverts_task_state @@ -3031,10 +3024,9 @@ class ComputeManager(manager.SchedulerDependentManager): update_totals=True) self._detach_volume(context, instance, bdm) - volume = self.volume_api.get(context, volume_id) connector = self.driver.get_volume_connector(instance) - self.volume_api.terminate_connection(context, volume, connector) - self.volume_api.detach(context.elevated(), volume) + self.volume_api.terminate_connection(context, volume_id, connector) + self.volume_api.detach(context.elevated(), volume_id) self.conductor_api.block_device_mapping_destroy_by_instance_and_volume( context, instance, volume_id) @@ -3047,9 +3039,8 @@ class ComputeManager(manager.SchedulerDependentManager): try: bdm = self._get_instance_volume_bdm(context, instance, volume_id) self._detach_volume(context, instance, bdm) - volume = self.volume_api.get(context, volume_id) connector = self.driver.get_volume_connector(instance) - self.volume_api.terminate_connection(context, volume, connector) + self.volume_api.terminate_connection(context, volume_id, connector) except exception.NotFound: pass @@ -3271,8 +3262,8 @@ class ComputeManager(manager.SchedulerDependentManager): # remove the volume connection without detaching from hypervisor # because the instance is not running anymore on the current host - volume = self.volume_api.get(ctxt, bdm['volume_id']) - self.volume_api.terminate_connection(ctxt, volume, connector) + self.volume_api.terminate_connection(ctxt, bdm['volume_id'], + connector) # Releasing vlan. # (not necessary in current implementation?) diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py index ffb2b4948..89d32a49d 100644 --- a/nova/tests/api/ec2/test_cinder_cloud.py +++ b/nova/tests/api/ec2/test_cinder_cloud.py @@ -371,12 +371,12 @@ class CinderCloudTestCase(test.TestCase): **kwargs) if 'snapshot_id' in values: self.volume_api.create_snapshot(self.context, - vol, + vol['id'], 'snapshot-bdm', 'fake snap for bdm tests', values['snapshot_id']) - self.volume_api.attach(self.context, vol, + self.volume_api.attach(self.context, vol['id'], instance_uuid, bdm['device_name']) volumes.append(vol) return volumes @@ -441,7 +441,7 @@ class CinderCloudTestCase(test.TestCase): def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes): for vol in volumes: - self.volume_api.delete(self.context, vol) + self.volume_api.delete(self.context, vol['id']) for uuid in (inst1['uuid'], inst2['uuid']): for bdm in db.block_device_mapping_get_all_by_instance( self.context, uuid): @@ -746,10 +746,10 @@ class CinderCloudTestCase(test.TestCase): self.assertTrue(str(vol['id']) == str(vol1_uuid) or str(vol['id']) == str(vol2_uuid)) if str(vol['id']) == str(vol1_uuid): - self.volume_api.attach(self.context, vol, + self.volume_api.attach(self.context, vol['id'], instance_uuid, '/dev/sdb') elif str(vol['id']) == str(vol2_uuid): - self.volume_api.attach(self.context, vol, + self.volume_api.attach(self.context, vol['id'], instance_uuid, '/dev/sdc') vol = self.volume_api.get(self.context, vol1_uuid) diff --git a/nova/tests/api/openstack/compute/contrib/test_volumes.py b/nova/tests/api/openstack/compute/contrib/test_volumes.py index 83372b251..3ab0d379e 100644 --- a/nova/tests/api/openstack/compute/contrib/test_volumes.py +++ b/nova/tests/api/openstack/compute/contrib/test_volumes.py @@ -199,7 +199,7 @@ class VolumeApiTest(test.TestCase): self.assertEqual(resp.status_int, 200) def test_volume_show_no_volume(self): - self.stubs.Set(cinder.API, "get", fakes.stub_volume_get_notfound) + self.stubs.Set(cinder.API, "get", fakes.stub_volume_notfound) req = webob.Request.blank('/v2/fake/os-volumes/456') resp = req.get_response(self.app) @@ -212,7 +212,7 @@ class VolumeApiTest(test.TestCase): self.assertEqual(resp.status_int, 202) def test_volume_delete_no_volume(self): - self.stubs.Set(cinder.API, "get", fakes.stub_volume_get_notfound) + self.stubs.Set(cinder.API, "delete", fakes.stub_volume_notfound) req = webob.Request.blank('/v2/fake/os-volumes/456') req.method = 'DELETE' diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index c5d57ecbb..ef3474249 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -797,7 +797,7 @@ class ServerActionsControllerTest(test.TestCase): self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api') volume_api = self.controller.compute_api.volume_api volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume) - volume_api.create_snapshot_force(mox.IgnoreArg(), volume, + volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'], mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot) self.mox.ReplayAll() diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 0b1ba5a6c..14e499a31 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -571,7 +571,7 @@ def stub_volume_get(self, context, volume_id): return stub_volume(volume_id) -def stub_volume_get_notfound(self, context, volume_id): +def stub_volume_notfound(self, context, volume_id): raise exc.VolumeNotFound(volume_id=volume_id) @@ -601,13 +601,13 @@ def stub_snapshot(id, **kwargs): return snapshot -def stub_snapshot_create(self, context, volume, name, description): - return stub_snapshot(100, volume_id=volume['id'], display_name=name, +def stub_snapshot_create(self, context, volume_id, name, description): + return stub_snapshot(100, volume_id=volume_id, display_name=name, display_description=description) -def stub_snapshot_delete(self, context, snapshot): - if snapshot['id'] == '-1': +def stub_snapshot_delete(self, context, snapshot_id): + if snapshot_id == '-1': raise exc.NotFound diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 4878397cc..edf4a5261 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -7044,11 +7044,6 @@ class ComputeAPITestCase(BaseTestCase): def fake_roll_detaching(*args, **kwargs): called['fake_roll_detaching'] = True - def fake_volume_get(self, context, volume_id): - called['fake_volume_get'] = True - return {'id': volume_id, 'attach_status': 'in-use'} - - self.stubs.Set(cinder.API, 'get', fake_volume_get) self.stubs.Set(cinder.API, 'roll_detaching', fake_roll_detaching) self.stubs.Set(self.compute, "_get_instance_volume_bdm", fake_get_instance_volume_bdm) @@ -7060,7 +7055,6 @@ class ComputeAPITestCase(BaseTestCase): self.assertRaises(AttributeError, self.compute.detach_volume, self.context, 1, instance) self.assertTrue(called.get('fake_libvirt_driver_instance_exists')) - self.assertTrue(called.get('fake_volume_get')) self.assertTrue(called.get('fake_roll_detaching')) def test_terminate_with_volumes(self): @@ -7076,18 +7070,18 @@ class ComputeAPITestCase(BaseTestCase): } db.block_device_mapping_create(admin, values) - def fake_volume_get(self, context, volume): + def fake_volume_get(self, context, volume_id): return {'id': volume_id} self.stubs.Set(cinder.API, "get", fake_volume_get) # Stub out and record whether it gets detached result = {"detached": False} - def fake_detach(self, context, volume): - result["detached"] = volume["id"] == volume_id + def fake_detach(self, context, volume_id_param): + result["detached"] = volume_id_param == volume_id self.stubs.Set(cinder.API, "detach", fake_detach) - def fake_terminate_connection(self, context, volume, connector): + def fake_terminate_connection(self, context, volume_id, connector): return {} self.stubs.Set(cinder.API, "terminate_connection", fake_terminate_connection) diff --git a/nova/tests/fake_volume.py b/nova/tests/fake_volume.py index 607f1444d..bec9bfb15 100644 --- a/nova/tests/fake_volume.py +++ b/nova/tests/fake_volume.py @@ -177,9 +177,10 @@ class API(object): def get_all(self, context): return self.volume_list - def delete(self, context, volume): - LOG.info('deleting volume %s', volume['id']) - self.volume_list = [v for v in self.volume_list if v != volume] + def delete(self, context, volume_id): + LOG.info('deleting volume %s', volume_id) + self.volume_list = [v for v in self.volume_list + if v['id'] != volume_id] def check_attach(self, context, volume, instance=None): if volume['status'] != 'available': @@ -199,9 +200,9 @@ class API(object): msg = _("already detached") raise exception.InvalidVolume(reason=msg) - def attach(self, context, volume, instance_uuid, mountpoint): - LOG.info('attaching volume %s', volume['id']) - volume = self.get(context, volume['id']) + def attach(self, context, volume_id, instance_uuid, mountpoint): + LOG.info('attaching volume %s', volume_id) + volume = self.get(context, volume_id) volume['status'] = 'in-use' volume['mountpoint'] = mountpoint volume['attach_status'] = 'attached' @@ -215,9 +216,9 @@ class API(object): del self.volume_list[:] del self.snapshot_list[:] - def detach(self, context, volume): - LOG.info('detaching volume %s', volume['id']) - volume = self.get(context, volume['id']) + def detach(self, context, volume_id): + LOG.info('detaching volume %s', volume_id) + volume = self.get(context, volume_id) volume['status'] = 'available' volume['mountpoint'] = None volume['attach_status'] = 'detached' @@ -237,7 +238,8 @@ class API(object): def get_all_snapshots(self, context): return self.snapshot_list - def create_snapshot(self, context, volume, name, description, id=None): + def create_snapshot(self, context, volume_id, name, description, id=None): + volume = self.get(context, volume_id) snapshot = fake_snapshot(volume['id'], volume['size'], name, description, id) self.snapshot_list.append(snapshot.snap) @@ -255,32 +257,34 @@ class API(object): self.snapshot_list.append(snapshot.snap) return snapshot.snap - def create_snapshot_force(self, context, volume, + def create_snapshot_force(self, context, volume_id, name, description, id=None): + volume = self.get(context, volume_id) snapshot = fake_snapshot(volume['id'], volume['size'], name, description, id) self.snapshot_list.append(snapshot.snap) return snapshot.snap - def delete_snapshot(self, context, snapshot): - self.snapshot_list = [s for s in self.snapshot_list if s != snapshot] + def delete_snapshot(self, context, snapshot_id): + self.snapshot_list = [s for s in self.snapshot_list + if s['id'] != snapshot_id] - def reserve_volume(self, context, volume): - LOG.info('reserving volume %s', volume['id']) - volume = self.get(context, volume['id']) + def reserve_volume(self, context, volume_id): + LOG.info('reserving volume %s', volume_id) + volume = self.get(context, volume_id) volume['status'] = 'attaching' - def unreserve_volume(self, context, volume): - LOG.info('unreserving volume %s', volume['id']) - volume = self.get(context, volume['id']) + def unreserve_volume(self, context, volume_id): + LOG.info('unreserving volume %s', volume_id) + volume = self.get(context, volume_id) volume['status'] = 'available' - def begin_detaching(self, context, volume): - LOG.info('beging detaching volume %s', volume['id']) - volume = self.get(context, volume['id']) + def begin_detaching(self, context, volume_id): + LOG.info('beging detaching volume %s', volume_id) + volume = self.get(context, volume_id) volume['status'] = 'detaching' - def roll_detaching(self, context, volume): - LOG.info('roll detaching volume %s', volume['id']) - volume = self.get(context, volume['id']) + def roll_detaching(self, context, volume_id): + LOG.info('roll detaching volume %s', volume_id) + volume = self.get(context, volume_id) volume['status'] = 'in-use' diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index 188f96055..d331b5aaf 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -3636,7 +3636,6 @@ class SnapshotsSampleJsonTests(ApiSampleTestBase): def _create_snapshot(self): self.stubs.Set(cinder.API, "create_snapshot", fakes.stub_snapshot_create) - self.stubs.Set(cinder.API, "get", fakes.stub_volume_get) response = self._do_post("os-snapshots", "snapshot-create-req", diff --git a/nova/tests/volume/test_cinder.py b/nova/tests/volume/test_cinder.py index 420fa2373..a235a526c 100644 --- a/nova/tests/volume/test_cinder.py +++ b/nova/tests/volume/test_cinder.py @@ -80,10 +80,10 @@ class CinderApiTestCase(test.TestCase): self.api.create(self.ctx, 1, '', '') def test_create_failed(self): - cinder.cinderclient(self.ctx).AndRaise(cinder_exception.NotFound('')) + cinder.cinderclient(self.ctx).AndRaise(cinder_exception.BadRequest('')) self.mox.ReplayAll() - self.assertRaises(exception.VolumeNotFound, + self.assertRaises(exception.InvalidInput, self.api.create, self.ctx, 1, '', '') def test_get_all(self): @@ -142,7 +142,7 @@ class CinderApiTestCase(test.TestCase): self.cinderclient.volumes.reserve('id1') self.mox.ReplayAll() - self.api.reserve_volume(self.ctx, {'id': 'id1'}) + self.api.reserve_volume(self.ctx, 'id1') def test_unreserve_volume(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) @@ -151,7 +151,7 @@ class CinderApiTestCase(test.TestCase): self.cinderclient.volumes.unreserve('id1') self.mox.ReplayAll() - self.api.unreserve_volume(self.ctx, {'id': 'id1'}) + self.api.unreserve_volume(self.ctx, 'id1') def test_begin_detaching(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) @@ -160,7 +160,7 @@ class CinderApiTestCase(test.TestCase): self.cinderclient.volumes.begin_detaching('id1') self.mox.ReplayAll() - self.api.begin_detaching(self.ctx, {'id': 'id1'}) + self.api.begin_detaching(self.ctx, 'id1') def test_roll_detaching(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) @@ -169,7 +169,7 @@ class CinderApiTestCase(test.TestCase): self.cinderclient.volumes.roll_detaching('id1') self.mox.ReplayAll() - self.api.roll_detaching(self.ctx, {'id': 'id1'}) + self.api.roll_detaching(self.ctx, 'id1') def test_attach(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) @@ -178,7 +178,7 @@ class CinderApiTestCase(test.TestCase): self.cinderclient.volumes.attach('id1', 'uuid', 'point') self.mox.ReplayAll() - self.api.attach(self.ctx, {'id': 'id1'}, 'uuid', 'point') + self.api.attach(self.ctx, 'id1', 'uuid', 'point') def test_detach(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) @@ -187,7 +187,7 @@ class CinderApiTestCase(test.TestCase): self.cinderclient.volumes.detach('id1') self.mox.ReplayAll() - self.api.detach(self.ctx, {'id': 'id1'}) + self.api.detach(self.ctx, 'id1') def test_initialize_connection(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) @@ -196,7 +196,7 @@ class CinderApiTestCase(test.TestCase): self.cinderclient.volumes.initialize_connection('id1', 'connector') self.mox.ReplayAll() - self.api.initialize_connection(self.ctx, {'id': 'id1'}, 'connector') + self.api.initialize_connection(self.ctx, 'id1', 'connector') def test_terminate_connection(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) @@ -205,7 +205,7 @@ class CinderApiTestCase(test.TestCase): self.cinderclient.volumes.terminate_connection('id1', 'connector') self.mox.ReplayAll() - self.api.terminate_connection(self.ctx, {'id': 'id1'}, 'connector') + self.api.terminate_connection(self.ctx, 'id1', 'connector') def test_delete(self): cinder.cinderclient(self.ctx).AndReturn(self.cinderclient) @@ -214,7 +214,7 @@ class CinderApiTestCase(test.TestCase): self.cinderclient.volumes.delete('id1') self.mox.ReplayAll() - self.api.delete(self.ctx, {'id': 'id1'}) + self.api.delete(self.ctx, 'id1') def test_update(self): self.assertRaises(NotImplementedError, @@ -270,7 +270,7 @@ class CinderApiTestCase(test.TestCase): self.cinderclient.volume_snapshots.delete('id1') self.mox.ReplayAll() - self.api.delete_snapshot(self.ctx, {'id': 'id1'}) + self.api.delete_snapshot(self.ctx, 'id1') def test_get_volume_metadata(self): self.assertRaises(NotImplementedError, diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py index 0d2666038..d08d4ef09 100644 --- a/nova/volume/cinder.py +++ b/nova/volume/cinder.py @@ -168,41 +168,46 @@ def _untranslate_snapshot_summary_view(context, snapshot): return d +def translate_volume_exception(method): + """Transforms the exception for the volume but keeps its traceback intact. + """ + def wrapper(self, ctx, volume_id, *args, **kwargs): + try: + res = method(self, ctx, volume_id, *args, **kwargs) + except cinder_exception.ClientException: + exc_type, exc_value, exc_trace = sys.exc_info() + if isinstance(exc_value, cinder_exception.NotFound): + exc_value = exception.VolumeNotFound(volume_id=volume_id) + elif isinstance(exc_value, cinder_exception.BadRequest): + exc_value = exception.InvalidInput(reason=exc_value.message) + raise exc_value, None, exc_trace + return res + return wrapper + + +def translate_snapshot_exception(method): + """Transforms the exception for the snapshot but keeps its traceback + intact. + """ + def wrapper(self, ctx, snapshot_id, *args, **kwargs): + try: + res = method(self, ctx, snapshot_id, *args, **kwargs) + except cinder_exception.ClientException: + exc_type, exc_value, exc_trace = sys.exc_info() + if isinstance(exc_value, cinder_exception.NotFound): + exc_value = exception.SnapshotNotFound(snapshot_id=snapshot_id) + raise exc_value, None, exc_trace + return res + return wrapper + + class API(base.Base): """API for interacting with the volume manager.""" - def _reraise_translated_volume_exception(self, volume_id=None): - """Transform the exception for the volume but keep its traceback - intact.""" - exc_type, exc_value, exc_trace = sys.exc_info() - new_exc = self._translate_volume_exception(volume_id, exc_value) - raise new_exc, None, exc_trace - - def _translate_volume_exception(self, volume_id, exc_value): - if isinstance(exc_value, cinder_exception.NotFound): - return exception.VolumeNotFound(volume_id=volume_id) - elif isinstance(exc_value, cinder_exception.BadRequest): - return exception.InvalidInput(reason=exc_value.message) - return exc_value - - def _reraise_translated_snapshot_exception(self, snapshot_id=None): - """Transform the exception for the snapshot but keep its traceback - intact.""" - exc_type, exc_value, exc_trace = sys.exc_info() - new_exc = self._translate_snapshot_exception(snapshot_id, exc_value) - raise new_exc, None, exc_trace - - def _translate_snapshot_exception(self, snapshot_id, exc_value): - if isinstance(exc_value, cinder_exception.NotFound): - return exception.SnapshotNotFound(snapshot_id=snapshot_id) - return exc_value - + @translate_volume_exception def get(self, context, volume_id): - try: - item = cinderclient(context).volumes.get(volume_id) - return _untranslate_volume_summary_view(context, item) - except Exception: - self._reraise_translated_volume_exception(volume_id) + item = cinderclient(context).volumes.get(volume_id) + return _untranslate_volume_summary_view(context, item) def get_all(self, context, search_opts={}): items = cinderclient(context).volumes.list(detailed=True) @@ -232,33 +237,40 @@ class API(base.Base): msg = _("already detached") raise exception.InvalidVolume(reason=msg) - def reserve_volume(self, context, volume): - cinderclient(context).volumes.reserve(volume['id']) + @translate_volume_exception + def reserve_volume(self, context, volume_id): + cinderclient(context).volumes.reserve(volume_id) - def unreserve_volume(self, context, volume): - cinderclient(context).volumes.unreserve(volume['id']) + @translate_volume_exception + def unreserve_volume(self, context, volume_id): + cinderclient(context).volumes.unreserve(volume_id) - def begin_detaching(self, context, volume): - cinderclient(context).volumes.begin_detaching(volume['id']) + @translate_volume_exception + def begin_detaching(self, context, volume_id): + cinderclient(context).volumes.begin_detaching(volume_id) - def roll_detaching(self, context, volume): - cinderclient(context).volumes.roll_detaching(volume['id']) + @translate_volume_exception + def roll_detaching(self, context, volume_id): + cinderclient(context).volumes.roll_detaching(volume_id) - def attach(self, context, volume, instance_uuid, mountpoint): - cinderclient(context).volumes.attach(volume['id'], - instance_uuid, + @translate_volume_exception + def attach(self, context, volume_id, instance_uuid, mountpoint): + cinderclient(context).volumes.attach(volume_id, instance_uuid, mountpoint) - def detach(self, context, volume): - cinderclient(context).volumes.detach(volume['id']) + @translate_volume_exception + def detach(self, context, volume_id): + cinderclient(context).volumes.detach(volume_id) - def initialize_connection(self, context, volume, connector): - return cinderclient(context).\ - volumes.initialize_connection(volume['id'], connector) + @translate_volume_exception + def initialize_connection(self, context, volume_id, connector): + return cinderclient(context).volumes.initialize_connection(volume_id, + connector) - def terminate_connection(self, context, volume, connector): - return cinderclient(context).\ - volumes.terminate_connection(volume['id'], connector) + @translate_volume_exception + def terminate_connection(self, context, volume_id, connector): + return cinderclient(context).volumes.terminate_connection(volume_id, + connector) def create(self, context, size, name, description, snapshot=None, image_id=None, volume_type=None, metadata=None, @@ -282,20 +294,20 @@ class API(base.Base): try: item = cinderclient(context).volumes.create(size, **kwargs) return _untranslate_volume_summary_view(context, item) - except Exception: - self._reraise_translated_volume_exception() + except cinder_exception.BadRequest as e: + raise exception.InvalidInput(reason=e.message) - def delete(self, context, volume): - cinderclient(context).volumes.delete(volume['id']) + @translate_volume_exception + def delete(self, context, volume_id): + cinderclient(context).volumes.delete(volume_id) - def update(self, context, volume, fields): + @translate_volume_exception + def update(self, context, volume_id, fields): raise NotImplementedError() + @translate_snapshot_exception def get_snapshot(self, context, snapshot_id): - try: - item = cinderclient(context).volume_snapshots.get(snapshot_id) - except Exception: - self._reraise_translated_snapshot_exception(snapshot_id) + item = cinderclient(context).volume_snapshots.get(snapshot_id) return _untranslate_snapshot_summary_view(context, item) def get_all_snapshots(self, context): @@ -307,32 +319,40 @@ class API(base.Base): return rvals - def create_snapshot(self, context, volume, name, description): - item = cinderclient(context).volume_snapshots.create(volume['id'], + @translate_volume_exception + def create_snapshot(self, context, volume_id, name, description): + item = cinderclient(context).volume_snapshots.create(volume_id, False, name, description) return _untranslate_snapshot_summary_view(context, item) - def create_snapshot_force(self, context, volume, name, description): - item = cinderclient(context).volume_snapshots.create(volume['id'], + @translate_volume_exception + def create_snapshot_force(self, context, volume_id, name, description): + item = cinderclient(context).volume_snapshots.create(volume_id, True, name, description) return _untranslate_snapshot_summary_view(context, item) - def delete_snapshot(self, context, snapshot): - cinderclient(context).volume_snapshots.delete(snapshot['id']) + @translate_snapshot_exception + def delete_snapshot(self, context, snapshot_id): + cinderclient(context).volume_snapshots.delete(snapshot_id) - def get_volume_metadata(self, context, volume): + @translate_volume_exception + def get_volume_metadata(self, context, volume_id): raise NotImplementedError() - def delete_volume_metadata(self, context, volume, key): + @translate_volume_exception + def delete_volume_metadata(self, context, volume_id, key): raise NotImplementedError() - def update_volume_metadata(self, context, volume, metadata, delete=False): + @translate_volume_exception + def update_volume_metadata(self, context, volume_id, + metadata, delete=False): raise NotImplementedError() - def get_volume_metadata_value(self, volume, key): + @translate_volume_exception + def get_volume_metadata_value(self, volume_id, key): raise NotImplementedError() |