From 1bf89924ba6eafc394612f9d62c1b52251825b2b Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 2 Jan 2013 10:32:38 -0800 Subject: Move block_device_mapping get operations to conductor This patch moves the block_device_mapping_get_by_instance() uses from compute/manager to the conductor. This also includes some nasty refactoring in some of compute's helper methods to pass around instances instead of just uuids so that we can properly pass the former to the conductor. Related to blueprint no-db-compute-manager Change-Id: Ib5223274d21096a9e47b3420c3de801b978b253d --- nova/compute/manager.py | 76 +++++++++++++++++----------------- nova/conductor/api.py | 8 ++++ nova/conductor/manager.py | 7 +++- nova/conductor/rpcapi.py | 7 ++++ nova/tests/conductor/test_conductor.py | 11 +++++ 5 files changed, 69 insertions(+), 40 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 2f13b6a05..4eb829fda 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -602,8 +602,8 @@ class ComputeManager(manager.SchedulerDependentManager): context, instance, "create.start", extra_usage_info=extra_usage_info) network_info = None - bdms = self.db.block_device_mapping_get_all_by_instance( - context, instance['uuid']) + bdms = self.conductor_api.block_device_mapping_get_all_by_instance( + context, instance) rt = self._get_resource_tracker(node) try: limits = filter_properties.get('limits', {}) @@ -909,13 +909,13 @@ class ComputeManager(manager.SchedulerDependentManager): return [bdm for bdm in bdms if bdm['volume_id']] # NOTE(danms): Legacy interface for digging up volumes in the database - def _get_instance_volume_bdms(self, context, instance_uuid): + def _get_instance_volume_bdms(self, context, instance): return self._get_volume_bdms( - self.db.block_device_mapping_get_all_by_instance(context, - instance_uuid)) + self.conductor_api.block_device_mapping_get_all_by_instance( + context, instance)) - def _get_instance_volume_bdm(self, context, instance_uuid, volume_id): - bdms = self._get_instance_volume_bdms(context, instance_uuid) + def _get_instance_volume_bdm(self, context, instance, volume_id): + bdms = self._get_instance_volume_bdms(context, instance) for bdm in bdms: # NOTE(vish): Comparing as strings because the os_api doesn't # convert to integer and we may wish to support uuids @@ -925,10 +925,10 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(danms): This is a transitional interface until all the callers # can provide their own bdms - def _get_instance_volume_block_device_info(self, context, instance_uuid, + def _get_instance_volume_block_device_info(self, context, instance, bdms=None): if bdms is None: - bdms = self._get_instance_volume_bdms(context, instance_uuid) + bdms = self._get_instance_volume_bdms(context, instance) return self._get_volume_block_device_info(bdms) def _get_volume_block_device_info(self, bdms): @@ -990,7 +990,7 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(vish) get bdms before destroying the instance vol_bdms = self._get_volume_bdms(bdms) block_device_info = self._get_instance_volume_block_device_info( - context, instance['uuid'], bdms=bdms) + context, instance, bdms=bdms) self.driver.destroy(instance, self._legacy_nw_info(network_info), block_device_info) for bdm in vol_bdms: @@ -1072,7 +1072,7 @@ class ComputeManager(manager.SchedulerDependentManager): elevated = context.elevated() # NOTE(danms): remove this compatibility in the future if not bdms: - bdms = self._get_instance_volume_bdms(context, instance["uuid"]) + bdms = self._get_instance_volume_bdms(context, instance) @lockutils.synchronized(instance['uuid'], 'nova-') def do_terminate_instance(instance, bdms): @@ -1255,8 +1255,9 @@ class ComputeManager(manager.SchedulerDependentManager): network_info = self.network_api.get_instance_nw_info(context, instance) if bdms is None: - bdms = self.db.block_device_mapping_get_all_by_instance( - context, instance['uuid']) + capi = self.conductor_api + bdms = capi.block_device_mapping_get_all_by_instance( + context, instance) device_info = self._setup_block_device_mapping(context, instance, bdms) @@ -1305,7 +1306,7 @@ class ComputeManager(manager.SchedulerDependentManager): # is no longer needed if block_device_info is None: block_device_info = self._get_instance_volume_block_device_info( - context, instance['uuid']) + context, instance) # NOTE(danms): remove this when RPC API < 2.5 compatibility # is no longer needed if network_info is None: @@ -1684,7 +1685,7 @@ class ComputeManager(manager.SchedulerDependentManager): network_info = self._get_instance_nw_info(context, instance) block_device_info = self._get_instance_volume_block_device_info( - context, instance['uuid']) + context, instance) self.driver.destroy(instance, self._legacy_nw_info(network_info), block_device_info) @@ -1734,9 +1735,9 @@ class ComputeManager(manager.SchedulerDependentManager): self.network_api.setup_networks_on_host(context, instance, migration['source_compute']) - bdms = self._get_instance_volume_bdms(context, instance['uuid']) + bdms = self._get_instance_volume_bdms(context, instance) block_device_info = self._get_instance_volume_block_device_info( - context, instance['uuid']) + context, instance) if bdms: connector = self.driver.get_volume_connector(instance) for bdm in bdms: @@ -1911,7 +1912,7 @@ class ComputeManager(manager.SchedulerDependentManager): context, instance, "resize.start", network_info=network_info) block_device_info = self._get_instance_volume_block_device_info( - context, instance['uuid']) + context, instance) disk_info = self.driver.migrate_disk_and_power_off( context, instance, migration['dest_host'], @@ -1941,7 +1942,7 @@ class ComputeManager(manager.SchedulerDependentManager): network_info=network_info) def _terminate_volume_connections(self, context, instance): - bdms = self._get_instance_volume_bdms(context, instance['uuid']) + bdms = self._get_instance_volume_bdms(context, instance) if bdms: connector = self.driver.get_volume_connector(instance) for bdm in bdms: @@ -1984,9 +1985,9 @@ class ComputeManager(manager.SchedulerDependentManager): context, instance, "finish_resize.start", network_info=network_info) - bdms = self._get_instance_volume_bdms(context, instance['uuid']) + bdms = self._get_instance_volume_bdms(context, instance) block_device_info = self._get_instance_volume_block_device_info( - context, instance['uuid'], bdms=bdms) + context, instance, bdms=bdms) if bdms: connector = self.driver.get_volume_connector(instance) @@ -2183,7 +2184,7 @@ class ComputeManager(manager.SchedulerDependentManager): network_info = self._get_instance_nw_info(context, instance) block_device_info = self._get_instance_volume_block_device_info( - context, instance['uuid']) + context, instance) self.driver.resume(instance, self._legacy_nw_info(network_info), block_device_info) @@ -2405,8 +2406,7 @@ class ComputeManager(manager.SchedulerDependentManager): @wrap_instance_fault def detach_volume(self, context, volume_id, instance): """Detach a volume from an instance.""" - bdm = self._get_instance_volume_bdm(context, instance['uuid'], - volume_id) + bdm = self._get_instance_volume_bdm(context, instance, volume_id) if CONF.volume_usage_poll_interval > 0: vol_stats = [] mp = bdm['device_name'] @@ -2440,9 +2440,7 @@ class ComputeManager(manager.SchedulerDependentManager): # detached, or delete the bdm, just remove the # connection from this host. try: - bdm = self._get_instance_volume_bdm(context, - instance['uuid'], - volume_id) + bdm = self._get_instance_volume_bdm(context, instance, volume_id) self._detach_volume(context, instance, bdm) volume = self.volume_api.get(context, volume_id) connector = self.driver.get_volume_connector(instance) @@ -2527,14 +2525,14 @@ class ComputeManager(manager.SchedulerDependentManager): """ # If any volume is mounted, prepare here. block_device_info = self._get_instance_volume_block_device_info( - context, instance['uuid']) + context, instance) if not block_device_info['block_device_mapping']: LOG.info(_('Instance has no volume.'), instance=instance) # assign the volume to host system # needed by the lefthand volume driver and maybe others connector = self.driver.get_volume_connector(instance) - for bdm in self._get_instance_volume_bdms(context, instance['uuid']): + for bdm in self._get_instance_volume_bdms(context, instance): volume = self.volume_api.get(context, bdm['volume_id']) self.volume_api.initialize_connection(context, volume, connector) @@ -2623,7 +2621,7 @@ class ComputeManager(manager.SchedulerDependentManager): # Detaching volumes. connector = self.driver.get_volume_connector(instance_ref) - for bdm in self._get_instance_volume_bdms(ctxt, instance_ref['uuid']): + for bdm in self._get_instance_volume_bdms(ctxt, instance_ref): # NOTE(vish): We don't want to actually mark the volume # detached, or delete the bdm, just remove the # connection from this host. @@ -2743,8 +2741,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.network_api.setup_networks_on_host(context, instance_ref, self.host) - for bdm in self._get_instance_volume_bdms(context, - instance_ref['uuid']): + for bdm in self._get_instance_volume_bdms(context, instance_ref): volume_id = bdm['volume_id'] volume = self.volume_api.get(context, volume_id) self.compute_rpcapi.remove_volume_connection(context, instance_ref, @@ -2780,7 +2777,7 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(vish): The mapping is passed in so the driver can disconnect # from remote volumes if necessary block_device_info = self._get_instance_volume_block_device_info( - context, instance['uuid']) + context, instance) self.driver.destroy(instance, self._legacy_nw_info(network_info), block_device_info) @@ -3039,8 +3036,7 @@ class ComputeManager(manager.SchedulerDependentManager): compute_host_bdms = [] instances = self.db.instance_get_all_by_host(context, self.host) for instance in instances: - instance_bdms = self._get_instance_volume_bdms(context, - instance['uuid']) + instance_bdms = self._get_instance_volume_bdms(context, instance) compute_host_bdms.append(dict(instance=instance, instance_bdms=instance_bdms)) @@ -3265,8 +3261,9 @@ class ComputeManager(manager.SchedulerDependentManager): soft_deleted = instance['vm_state'] == vm_states.SOFT_DELETED if soft_deleted and old_enough: - bdms = self.db.block_device_mapping_get_all_by_instance( - context, instance['uuid']) + capi = self.conductor_api + bdms = capi.block_device_mapping_get_all_by_instance( + context, instance) LOG.info(_('Reclaiming deleted instance'), instance=instance) self._delete_instance(context, instance, bdms) @@ -3317,8 +3314,9 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(sirp): admin contexts don't ordinarily return deleted records with utils.temporary_mutation(context, read_deleted="yes"): for instance in self._running_deleted_instances(context): - bdms = self.db.block_device_mapping_get_all_by_instance( - context, instance['uuid']) + capi = self.conductor_api + bdms = capi.block_device_mapping_get_all_by_instance( + context, instance) if action == "log": name = instance['name'] diff --git a/nova/conductor/api.py b/nova/conductor/api.py index 650849781..1fe78d4ab 100644 --- a/nova/conductor/api.py +++ b/nova/conductor/api.py @@ -150,6 +150,10 @@ class LocalAPI(object): return self._manager.block_device_mapping_update_or_create(context, values) + def block_device_mapping_get_all_by_instance(self, context, instance): + return self._manager.block_device_mapping_get_all_by_instance( + context, instance) + class API(object): """Conductor API that does updates via RPC to the ConductorManager""" @@ -249,3 +253,7 @@ class API(object): def block_device_mapping_update_or_create(self, context, values): return self.conductor_rpcapi.block_device_mapping_update_or_create( context, values) + + def block_device_mapping_get_all_by_instance(self, context, instance): + return self.conductor_rpcapi.block_device_mapping_get_all_by_instance( + context, instance) diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index 6d346b0a4..bac8207d4 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -43,7 +43,7 @@ datetime_fields = ['launched_at', 'terminated_at'] class ConductorManager(manager.SchedulerDependentManager): """Mission: TBD""" - RPC_API_VERSION = '1.12' + RPC_API_VERSION = '1.13' def __init__(self, *args, **kwargs): super(ConductorManager, self).__init__(service_name='conductor', @@ -164,3 +164,8 @@ class ConductorManager(manager.SchedulerDependentManager): self.db.block_device_mapping_create(context, values) else: self.db.block_device_mapping_update(context, values['id'], values) + + def block_device_mapping_get_all_by_instance(self, context, instance): + bdms = self.db.block_device_mapping_get_all_by_instance( + context, instance['uuid']) + return jsonutils.to_primitive(bdms) diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py index 3e9aa44a2..fea461d26 100644 --- a/nova/conductor/rpcapi.py +++ b/nova/conductor/rpcapi.py @@ -41,6 +41,7 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy): 1.10 - Added agent_build_get_by_triple 1.11 - Added aggregate_get 1.12 - Added block_device_mapping_update_or_create + 1.13 - Added block_device_mapping_get_all_by_instance """ BASE_RPC_API_VERSION = '1.0' @@ -152,3 +153,9 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy): msg = self.make_msg('block_device_mapping_update_or_create', values=values, create=create) return self.call(context, msg, version='1.12') + + def block_device_mapping_get_all_by_instance(self, context, instance): + instance_p = jsonutils.to_primitive(instance) + msg = self.make_msg('block_device_mapping_get_all_by_instance', + instance=instance_p) + return self.call(context, msg, version='1.13') diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py index 0d1ff1d60..4aa552cc7 100644 --- a/nova/tests/conductor/test_conductor.py +++ b/nova/tests/conductor/test_conductor.py @@ -268,6 +268,17 @@ class _BaseTestCase(object): 'fake-arch') self.assertEqual(result, 'it worked') + def test_block_device_mapping_get_all_by_instance(self): + fake_inst = {'uuid': 'fake-uuid'} + self.mox.StubOutWithMock(db, + 'block_device_mapping_get_all_by_instance') + db.block_device_mapping_get_all_by_instance( + self.context, fake_inst['uuid']).AndReturn('fake-result') + self.mox.ReplayAll() + result = self.conductor.block_device_mapping_get_all_by_instance( + self.context, fake_inst) + self.assertEqual(result, 'fake-result') + class ConductorTestCase(_BaseTestCase, test.TestCase): """Conductor Manager Tests""" -- cgit