diff options
| author | Jenkins <jenkins@review.openstack.org> | 2013-05-15 19:50:20 +0000 |
|---|---|---|
| committer | Gerrit Code Review <review@openstack.org> | 2013-05-15 19:50:20 +0000 |
| commit | ae624fe2e75cfc61826b160e68823bc41d062518 (patch) | |
| tree | 734abc38f0a022160fe3101a094504c8093d2f1e /nova/compute | |
| parent | 1a71dfcd274fb623694203bfe6a2db8b7355bb74 (diff) | |
| parent | 586e752e69ca891714f390bf59ad30d5081d4498 (diff) | |
Merge "Refactor nova.volume.cinder.API to reduce roundtrips with Cinder"
Diffstat (limited to 'nova/compute')
| -rw-r--r-- | nova/compute/api.py | 13 | ||||
| -rwxr-xr-x | nova/compute/manager.py | 51 |
2 files changed, 27 insertions, 37 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py index aa0ae7d96..f47121c36 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1202,18 +1202,17 @@ class API(base.Base): # cleanup volumes for bdm in bdms: if bdm['volume_id']: - volume = self.volume_api.get(context, bdm['volume_id']) # NOTE(vish): We don't have access to correct volume # connector info, so just pass a fake # connector. This can be improved when we # expose get_volume_connector to rpc. connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'} self.volume_api.terminate_connection(context, - volume, + bdm['volume_id'], connector) - self.volume_api.detach(elevated, volume) + self.volume_api.detach(elevated, bdm['volume_id']) if bdm['delete_on_termination']: - self.volume_api.delete(context, volume) + self.volume_api.delete(context, bdm['volume_id']) self.db.block_device_mapping_destroy(context, bdm['id']) instance = self._instance_update(context, instance_uuid, @@ -1633,7 +1632,7 @@ class API(base.Base): # short time, it doesn't matter for now. name = _('snapshot for %s') % image_meta['name'] snapshot = self.volume_api.create_snapshot_force( - context, volume, name, volume['display_description']) + context, volume['id'], name, volume['display_description']) bdm['snapshot_id'] = snapshot['id'] bdm['volume_id'] = None @@ -2334,7 +2333,7 @@ class API(base.Base): try: volume = self.volume_api.get(context, volume_id) self.volume_api.check_attach(context, volume, instance=instance) - self.volume_api.reserve_volume(context, volume) + self.volume_api.reserve_volume(context, volume_id) self.compute_rpcapi.attach_volume(context, instance=instance, volume_id=volume_id, mountpoint=device) except Exception: @@ -2349,7 +2348,7 @@ class API(base.Base): it easier for cells version to override. """ self.volume_api.check_detach(context, volume) - self.volume_api.begin_detaching(context, volume) + self.volume_api.begin_detaching(context, volume['id']) self.compute_rpcapi.detach_volume(context, instance=instance, volume_id=volume['id']) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7bc8281d4..02527913a 100755 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -793,7 +793,7 @@ class ComputeManager(manager.SchedulerDependentManager): if bdm['volume_id'] is not None: volume = self.volume_api.get(context, bdm['volume_id']) self.volume_api.check_attach(context, volume, - instance=instance) + instance=instance) cinfo = self._attach_volume_boot(context, instance, volume, @@ -1352,12 +1352,11 @@ class ComputeManager(manager.SchedulerDependentManager): try: # NOTE(vish): actual driver detach done in driver.destroy, so # just tell nova-volume that we are done with it. - volume = self.volume_api.get(context, bdm['volume_id']) connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, - volume, + bdm['volume_id'], connector) - self.volume_api.detach(context, volume) + self.volume_api.detach(context, bdm['volume_id']) except exception.DiskNotFound as exc: LOG.warn(_('Ignoring DiskNotFound: %s') % exc, instance=instance) @@ -1372,8 +1371,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.debug(_("terminating bdm %s") % bdm, instance_uuid=instance_uuid) if bdm['volume_id'] and bdm['delete_on_termination']: - volume = self.volume_api.get(context, bdm['volume_id']) - self.volume_api.delete(context, volume) + self.volume_api.delete(context, bdm['volume_id']) # NOTE(vish): bdms will be deleted on instance destroy @hooks.add_hook("delete_instance") @@ -1688,8 +1686,7 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(sirp): this detach is necessary b/c we will reattach the # volumes in _prep_block_devices below. for bdm in self._get_volume_bdms(bdms): - volume = self.volume_api.get(context, bdm['volume_id']) - self.volume_api.detach(context, volume) + self.volume_api.detach(context, bdm['volume_id']) if not recreate: block_device_info = self._get_volume_block_device_info( @@ -1762,7 +1759,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.info(_("Detaching from volume api: %s") % volume_id) volume = self.volume_api.get(context, volume_id) self.volume_api.check_detach(context, volume) - self.volume_api.begin_detaching(context, volume) + self.volume_api.begin_detaching(context, volume_id) # Manager-detach self.detach_volume(context, volume_id, instance) @@ -2245,9 +2242,8 @@ class ComputeManager(manager.SchedulerDependentManager): connector = self.driver.get_volume_connector(instance) for bdm in bdms: - volume = self.volume_api.get(context, bdm['volume_id']) cinfo = self.volume_api.initialize_connection( - context, volume, connector) + context, bdm['volume_id'], connector) self.conductor_api.block_device_mapping_update( context, bdm['id'], @@ -2509,9 +2505,8 @@ class ComputeManager(manager.SchedulerDependentManager): if bdms: connector = self.driver.get_volume_connector(instance) for bdm in bdms: - volume = self.volume_api.get(context, bdm['volume_id']) - self.volume_api.terminate_connection(context, volume, - connector) + self.volume_api.terminate_connection(context, bdm['volume_id'], + connector) def _finish_resize(self, context, instance, migration, disk_info, image): @@ -2916,9 +2911,9 @@ class ComputeManager(manager.SchedulerDependentManager): locals(), context=context, instance=instance) connector = self.driver.get_volume_connector(instance) connection_info = self.volume_api.initialize_connection(context, - volume, + volume_id, connector) - self.volume_api.attach(context, volume, instance_uuid, mountpoint) + self.volume_api.attach(context, volume_id, instance_uuid, mountpoint) return connection_info @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -2961,14 +2956,13 @@ class ComputeManager(manager.SchedulerDependentManager): context, instance, mountpoint) def _attach_volume(self, context, volume_id, mountpoint, instance): - volume = self.volume_api.get(context, volume_id) context = context.elevated() LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'), locals(), context=context, instance=instance) try: connector = self.driver.get_volume_connector(instance) connection_info = self.volume_api.initialize_connection(context, - volume, + volume_id, connector) except Exception: # pylint: disable=W0702 with excutils.save_and_reraise_exception(): @@ -2976,7 +2970,7 @@ class ComputeManager(manager.SchedulerDependentManager): "while attaching at %(mountpoint)s") LOG.exception(msg % locals(), context=context, instance=instance) - self.volume_api.unreserve_volume(context, volume) + self.volume_api.unreserve_volume(context, volume_id) if 'serial' not in connection_info: connection_info['serial'] = volume_id @@ -2992,11 +2986,11 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.exception(msg % locals(), context=context, instance=instance) self.volume_api.terminate_connection(context, - volume, + volume_id, connector) self.volume_api.attach(context, - volume, + volume_id, instance['uuid'], mountpoint) values = { @@ -3037,8 +3031,7 @@ class ComputeManager(manager.SchedulerDependentManager): msg = _("Failed to detach volume %(volume_id)s from %(mp)s") LOG.exception(msg % locals(), context=context, instance=instance) - volume = self.volume_api.get(context, volume_id) - self.volume_api.roll_detaching(context, volume) + self.volume_api.roll_detaching(context, volume_id) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @reverts_task_state @@ -3067,10 +3060,9 @@ class ComputeManager(manager.SchedulerDependentManager): update_totals=True) self._detach_volume(context, instance, bdm) - volume = self.volume_api.get(context, volume_id) connector = self.driver.get_volume_connector(instance) - self.volume_api.terminate_connection(context, volume, connector) - self.volume_api.detach(context.elevated(), volume) + self.volume_api.terminate_connection(context, volume_id, connector) + self.volume_api.detach(context.elevated(), volume_id) self.conductor_api.block_device_mapping_destroy_by_instance_and_volume( context, instance, volume_id) @@ -3083,9 +3075,8 @@ class ComputeManager(manager.SchedulerDependentManager): try: bdm = self._get_instance_volume_bdm(context, instance, volume_id) self._detach_volume(context, instance, bdm) - volume = self.volume_api.get(context, volume_id) connector = self.driver.get_volume_connector(instance) - self.volume_api.terminate_connection(context, volume, connector) + self.volume_api.terminate_connection(context, volume_id, connector) except exception.NotFound: pass @@ -3314,8 +3305,8 @@ class ComputeManager(manager.SchedulerDependentManager): # remove the volume connection without detaching from hypervisor # because the instance is not running anymore on the current host - volume = self.volume_api.get(ctxt, bdm['volume_id']) - self.volume_api.terminate_connection(ctxt, volume, connector) + self.volume_api.terminate_connection(ctxt, bdm['volume_id'], + connector) # Releasing vlan. # (not necessary in current implementation?) |
