From 7b39ef953a7157ab657329fc02075056e2d8a93a Mon Sep 17 00:00:00 2001 From: Brian Elliott Date: Tue, 20 Nov 2012 20:34:11 +0000 Subject: Cleanup compute multi-node assignment of node Move assignment of the node field on the instance to the compute host to be more consistent with how the host field is assigned and handled by the resource tracker. Change-Id: Id3086585a99350abbab387932e689825b33ab6b5 --- nova/compute/manager.py | 16 +++++++++++----- nova/compute/resource_tracker.py | 18 ++++++++++++------ nova/compute/rpcapi.py | 8 +++++--- 3 files changed, 28 insertions(+), 14 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 724760350..a957e77cc 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -306,7 +306,7 @@ class ComputeVirtAPI(virtapi.VirtAPI): class ComputeManager(manager.SchedulerDependentManager): """Manages the running instances from creation to destruction.""" - RPC_API_VERSION = '2.18' + RPC_API_VERSION = '2.19' def __init__(self, compute_driver=None, *args, **kwargs): """Load configuration options and connect to the hypervisor.""" @@ -576,13 +576,19 @@ class ComputeManager(manager.SchedulerDependentManager): def _run_instance(self, context, request_spec, filter_properties, requested_networks, injected_files, - admin_password, is_first_time, instance): + admin_password, is_first_time, node, instance): """Launch a new instance with specified options.""" context = context.elevated() try: self._check_instance_not_already_created(context, instance) image_meta = self._check_image_size(context, instance) + + if node is None: + node = self.driver.get_available_nodes()[0] + LOG.debug(_("No node specified, defaulting to %(node)s") % + locals()) + extra_usage_info = {"image_name": image_meta['name']} self._start_building(context, instance) self._notify_about_instance_usage( @@ -591,7 +597,7 @@ class ComputeManager(manager.SchedulerDependentManager): network_info = None bdms = self.db.block_device_mapping_get_all_by_instance( context, instance['uuid']) - rt = self._get_resource_tracker(instance.get('node')) + rt = self._get_resource_tracker(node) try: limits = filter_properties.get('limits', {}) with rt.instance_claim(context, instance, limits): @@ -941,7 +947,7 @@ class ComputeManager(manager.SchedulerDependentManager): def run_instance(self, context, instance, request_spec=None, filter_properties=None, requested_networks=None, injected_files=None, admin_password=None, - is_first_time=False): + is_first_time=False, node=None): if filter_properties is None: filter_properties = {} @@ -952,7 +958,7 @@ class ComputeManager(manager.SchedulerDependentManager): def do_run_instance(): self._run_instance(context, request_spec, filter_properties, requested_networks, injected_files, - admin_password, is_first_time, instance) + admin_password, is_first_time, node, instance) do_run_instance() def _shutdown_instance(self, context, instance, bdms): diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py index 7bb402916..ac46f31da 100644 --- a/nova/compute/resource_tracker.py +++ b/nova/compute/resource_tracker.py @@ -82,21 +82,26 @@ class ResourceTracker(object): """ if self.disabled: # compute_driver doesn't support resource tracking, just - # set the 'host' field and continue the build: - self._set_instance_host(context, instance_ref) + # set the 'host' and node fields and continue the build: + self._set_instance_host_and_node(context, instance_ref) return claims.NopClaim() - # sanity check: + # sanity checks: if instance_ref['host']: LOG.warning(_("Host field should not be set on the instance until " "resources have been claimed."), instance=instance_ref) + if instance_ref['node']: + LOG.warning(_("Node field should be not be set on the instance " + "until resources have been claimed."), + instance=instance_ref) + claim = claims.Claim(instance_ref, self) if claim.test(self.compute_node, limits): - self._set_instance_host(context, instance_ref) + self._set_instance_host_and_node(context, instance_ref) # Mark resources in-use and update stats self._update_usage_from_instance(self.compute_node, instance_ref) @@ -168,12 +173,13 @@ class ResourceTracker(object): 'new_instance_type_id': instance_type['id'], 'status': 'pre-migrating'}) - def _set_instance_host(self, context, instance_ref): + def _set_instance_host_and_node(self, context, instance_ref): """Tag the instance as belonging to this host. This should be done while the COMPUTE_RESOURCES_SEMPAHORE is held so the resource claim will not be lost if the audit process starts. """ - values = {'host': self.host, 'launched_on': self.host} + values = {'host': self.host, 'node': self.nodename, + 'launched_on': self.host} (old_ref, new_ref) = db.instance_update_and_get_original(context, instance_ref['uuid'], values) notifications.send_update(context, old_ref, new_ref) diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py index bb5e7601d..6f69a3cd8 100644 --- a/nova/compute/rpcapi.py +++ b/nova/compute/rpcapi.py @@ -146,6 +146,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy): 2.16 - Add instance_type to resize_instance 2.17 - Add get_backdoor_port() 2.18 - Add bdms to rebuild_instance + 2.19 - Add node to run_instance ''' # @@ -474,14 +475,15 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy): def run_instance(self, ctxt, instance, host, request_spec, filter_properties, requested_networks, injected_files, admin_password, - is_first_time): + is_first_time, node=None): instance_p = jsonutils.to_primitive(instance) self.cast(ctxt, self.make_msg('run_instance', instance=instance_p, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, - is_first_time=is_first_time), - topic=_compute_topic(self.topic, ctxt, host, None)) + is_first_time=is_first_time, node=node), + topic=_compute_topic(self.topic, ctxt, host, None), + version='2.19') def set_admin_password(self, ctxt, instance, new_pass): instance_p = jsonutils.to_primitive(instance) -- cgit