From 093c8200a102891232e2da166830cd59ee133fc4 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 14 Mar 2011 13:32:22 -0500 Subject: committing to share --- nova/compute/api.py | 3 +-- nova/compute/manager.py | 12 +++++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index c475e3bff..1737565aa 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -184,8 +184,7 @@ class API(base.Base): instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): - instance = dict(mac_address=utils.generate_mac(), - launch_index=num, + instance = dict(launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d659712ad..ce2fa8713 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -193,16 +193,22 @@ class ComputeManager(manager.Manager): # with the address currently, but I'm leaving it as # a call to ensure that network setup completes. We # will eventually also need to save the address here. + #NOTE(tr3buchet): I don't see why we'd save it here when the network + # manager is saving it. if not FLAGS.stub_network: - address = rpc.call(context, - self.get_network_topic(context), - {"method": "allocate_fixed_ip", + rpc.call(context, self.get_network_topic(context), + {"method": "allocate_fixed_ips", "args": {"instance_id": instance_id, "vpn": is_vpn}}) + rpc.call(context, self.get_network_topic(context), + {"method": "allocate_mac_addresses", + "args": {"instance_id": instance_id}}) self.network_manager.setup_compute_network(context, instance_id) + Log.debug(_("instance addresses: |%s|"), instance_ref['fixed_ips']) + # TODO(vish) check to make sure the availability zone matches self.db.instance_set_state(context, instance_id, -- cgit From 57890776d0d7e9172b1fa056076ce28ae4b34b7b Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 14 Mar 2011 17:54:39 -0500 Subject: added migration to repo --- nova/compute/manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ce2fa8713..076831c0b 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -204,9 +204,9 @@ class ComputeManager(manager.Manager): {"method": "allocate_mac_addresses", "args": {"instance_id": instance_id}}) - self.network_manager.setup_compute_network(context, - instance_id) - + nw_info = rpc.call(context, self.get_network_topic(context), + {"method": "allocate_for_instance", + "args": {"instance_id": instance_id}}) Log.debug(_("instance addresses: |%s|"), instance_ref['fixed_ips']) # TODO(vish) check to make sure the availability zone matches -- cgit From 1845c5df145251f1e90709a91cc02ee5ec787e2f Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 11 Apr 2011 14:16:30 -0500 Subject: network manager changes, compute changes, various other --- nova/compute/manager.py | 83 ++++++++++++++++--------------------------------- 1 file changed, 26 insertions(+), 57 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 2c5d958e6..f5bcaf603 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -200,9 +200,9 @@ class ComputeManager(manager.SchedulerDependentManager): def run_instance(self, context, instance_id, **kwargs): """Launch a new instance with specified options.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) - instance_ref.injected_files = kwargs.get('injected_files', []) - if instance_ref['name'] in self.driver.list_instances(): + instance = self.db.instance_get(context, instance_id) + instance.injected_files = kwargs.get('injected_files', []) + if instance['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) LOG.audit(_("instance %s: starting..."), instance_id, context=context) @@ -215,26 +215,13 @@ class ComputeManager(manager.SchedulerDependentManager): power_state.NOSTATE, 'networking') - is_vpn = instance_ref['image_id'] == FLAGS.vpn_image_id - # NOTE(vish): This could be a cast because we don't do anything - # with the address currently, but I'm leaving it as - # a call to ensure that network setup completes. We - # will eventually also need to save the address here. - #NOTE(tr3buchet): I don't see why we'd save it here when the network - # manager is saving it. + is_vpn = instance['image_id'] == FLAGS.vpn_image_id if not FLAGS.stub_network: - rpc.call(context, self.get_network_topic(context), - {"method": "allocate_fixed_ips", - "args": {"instance_id": instance_id, - "vpn": is_vpn}}) - rpc.call(context, self.get_network_topic(context), - {"method": "allocate_mac_addresses", - "args": {"instance_id": instance_id}}) - - nw_info = rpc.call(context, self.get_network_topic(context), - {"method": "allocate_for_instance", - "args": {"instance_id": instance_id}}) - Log.debug(_("instance addresses: |%s|"), instance_ref['fixed_ips']) + network_info = rpc.call(context, self.get_network_topic(context), + {"method": "allocate_for_instance", + "args": {"instance_id": instance_id, + "vpn": is_vpn}}) + Log.debug(_("instance network_info: |%s|"), network_info) # TODO(vish) check to make sure the availability zone matches self.db.instance_set_state(context, @@ -243,7 +230,7 @@ class ComputeManager(manager.SchedulerDependentManager): 'spawning') try: - self.driver.spawn(instance_ref) + self.driver.spawn(instance, network_info) now = datetime.datetime.utcnow() self.db.instance_update(context, instance_id, @@ -263,45 +250,22 @@ class ComputeManager(manager.SchedulerDependentManager): def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) + instance = self.db.instance_get(context, instance_id) LOG.audit(_("Terminating instance %s"), instance_id, context=context) - fixed_ip = instance_ref.get('fixed_ip') - if not FLAGS.stub_network and fixed_ip: - floating_ips = fixed_ip.get('floating_ips') or [] - for floating_ip in floating_ips: - address = floating_ip['address'] - LOG.debug("Disassociating address %s", address, - context=context) - # NOTE(vish): Right now we don't really care if the ip is - # disassociated. We may need to worry about - # checking this later. - network_topic = self.db.queue_get_for(context, - FLAGS.network_topic, - floating_ip['host']) - rpc.cast(context, - network_topic, - {"method": "disassociate_floating_ip", - "args": {"floating_address": address}}) - - address = fixed_ip['address'] - if address: - LOG.debug(_("Deallocating address %s"), address, - context=context) - # NOTE(vish): Currently, nothing needs to be done on the - # network node until release. If this changes, - # we will need to cast here. - self.network_manager.deallocate_fixed_ip(context.elevated(), - address) - - volumes = instance_ref.get('volumes') or [] + if not FLAGS.stub_network: + rpc.call(context, self.get_network_topic(context), + {"method": "allocate_for_instance", + "args": {"instance_id": instance_id}}) + + volumes = instance.get('volumes') or [] for volume in volumes: self.detach_volume(context, instance_id, volume['id']) - if instance_ref['state'] == power_state.SHUTOFF: + if instance['state'] == power_state.SHUTOFF: self.db.instance_destroy(context, instance_id) raise exception.Error(_('trying to destroy already destroyed' ' instance: %s') % instance_id) - self.driver.destroy(instance_ref) + self.driver.destroy(instance) # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) @@ -712,10 +676,15 @@ class ComputeManager(manager.SchedulerDependentManager): """ context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) LOG.debug(_('instance %s: inject network info'), instance_id, context=context) - self.driver.inject_network_info(instance_ref) + instance = self.db.instance_get(context, instance) + network_info = rpc.call(context, self.get_network_topic(context), + {"method": "get_instance_nw_info", + "args": {"instance": instance}}) + Log.debug(_("network_info: |%s|"), network_info) + + self.driver.inject_network_info(instance, network_info=network_info) @exception.wrap_exception def get_console_output(self, context, instance_id): -- cgit From 952528a65cc73fdf45f3ff2e2bdfaa68ce278a16 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Mon, 11 Apr 2011 15:26:44 -0500 Subject: typo --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6c5bfba3d..b5799dcc5 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -249,7 +249,7 @@ class ComputeManager(manager.SchedulerDependentManager): if not FLAGS.stub_network: rpc.call(context, self.get_network_topic(context), - {"method": "allocate_for_instance", + {"method": "deallocate_for_instance", "args": {"instance_id": instance_id}}) volumes = instance.get('volumes') or [] -- cgit From b93abf52587da04f8079be9be1ed0f9a473a9613 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 21 Apr 2011 11:48:47 -0500 Subject: commit to push for testing --- nova/compute/api.py | 26 ++++++++++++++++++++++++-- nova/compute/manager.py | 37 +++++++++++++++++++++---------------- 2 files changed, 45 insertions(+), 18 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index b376971dc..b700c5005 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -461,7 +461,7 @@ class API(base.Base): params = {} if not host: instance = self.get(context, instance_id) - host = instance["host"] + host = instance['host'] queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) params['instance_id'] = instance_id kwargs = {'method': method, 'args': params} @@ -697,10 +697,32 @@ class API(base.Base): return instance def associate_floating_ip(self, context, instance_id, address): + """makes calls to network_api to associate_floating_ip + + address is a string floating ip address + """ instance = self.get(context, instance_id) + + # TODO(tr3buchet): currently network_info doesn't contain floating IPs + # in its info, if this changes, the next few lines will need to + # accomodate the info containing floating as well as fixed ip addresses + fixed_ip_addrs = [] + for (network, info) in self.network_api.get_instance_nw_info(context, + instance): + fixed_ip_addrs.extend([ip_dict.ip for ip_dict in info['ips']]) + + # TODO(tr3buchet): this will associate the floating IP with the first + # fixed_ip (lowest id) an instance has. This should be changed to + # support specifying a particular fixed_ip if multiple exist. + if not fixed_ip_addrs: + msg = _("instance |%s| has no fixed_ips. " + "unable to associate floating ip") % instance_id + raise exception.ApiError(msg) + if len(fixed_ip_addrs) > 1: + LOG.warning(_("multiple fixed_ips exist, using the first")) self.network_api.associate_floating_ip(context, floating_ip=address, - fixed_ip=instance['fixed_ip']) + fixed_ip=fixed_ip_addrs[0]) def get_instance_metadata(self, context, instance_id): """Get all metadata associated with an instance.""" diff --git a/nova/compute/manager.py b/nova/compute/manager.py index b5799dcc5..a95ca3f6e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -43,6 +43,7 @@ import socket import sys import tempfile import functools +import pickle from eventlet import greenthread @@ -50,6 +51,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import manager +from nova import network from nova import rpc from nova import utils from nova.compute import power_state @@ -130,6 +132,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.error(_("Unable to load the virtualization driver: %s") % (e)) sys.exit(1) + self.network_api = network.API() self.network_manager = utils.import_object(FLAGS.network_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager) super(ComputeManager, self).__init__(service_name="compute", @@ -211,11 +214,16 @@ class ComputeManager(manager.SchedulerDependentManager): is_vpn = instance['image_id'] == FLAGS.vpn_image_id if not FLAGS.stub_network: - network_info = rpc.call(context, self.get_network_topic(context), - {"method": "allocate_for_instance", - "args": {"instance_id": instance_id, - "vpn": is_vpn}}) - Log.debug(_("instance network_info: |%s|"), network_info) + network_info = self.network_api.allocate_for_instance(context, + instance, + vpn=is_vpn) + LOG.debug(_("instance network_info: |%s|"), network_info) + else: + # TODO(tr3buchet) not really sure how this should be handled. + # virt requires network_info to be passed in but stub_network + # is enabled. Setting to [] for now will cause virt to skip + # all vif creation and network injection, maybe this is correct + network_info = [] # TODO(vish) check to make sure the availability zone matches self.db.instance_set_state(context, @@ -248,9 +256,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.audit(_("Terminating instance %s"), instance_id, context=context) if not FLAGS.stub_network: - rpc.call(context, self.get_network_topic(context), - {"method": "deallocate_for_instance", - "args": {"instance_id": instance_id}}) + self.network_api.deallocate_for_instance(context, instance) volumes = instance.get('volumes') or [] for volume in volumes: @@ -672,13 +678,12 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() LOG.debug(_('instance %s: inject network info'), instance_id, context=context) - instance = self.db.instance_get(context, instance) - network_info = rpc.call(context, self.get_network_topic(context), - {"method": "get_instance_nw_info", - "args": {"instance": instance}}) - Log.debug(_("network_info: |%s|"), network_info) + instance = self.db.instance_get(context, instance_id) + network_info = self.network_api.get_instance_nw_info(context, + instance) + LOG.debug(_("network_info to inject: |%s|"), network_info) - self.driver.inject_network_info(instance, network_info=network_info) + self.driver.inject_network_info(instance, network_info) @exception.wrap_exception def get_console_output(self, context, instance_id): @@ -840,8 +845,8 @@ class ComputeManager(manager.SchedulerDependentManager): ec2_id = instance_ref['hostname'] # Getting fixed ips - fixed_ip = self.db.instance_get_fixed_address(context, instance_id) - if not fixed_ip: + fixed_ips = self.db.instance_get_fixed_addresses(context, instance_id) + if not fixed_ips: msg = _("%(instance_id)s(%(ec2_id)s) does not have fixed_ip.") raise exception.NotFound(msg % locals()) -- cgit From f29be40bffd0b4e2b26ce06d81090d5918e84539 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 21 Apr 2011 11:54:20 -0500 Subject: altered imports --- nova/compute/manager.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index a95ca3f6e..82cf8ea70 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -43,7 +43,6 @@ import socket import sys import tempfile import functools -import pickle from eventlet import greenthread -- cgit From 521b6b36b0927d9c0b674db0e611cdb6f3851a08 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 21 Apr 2011 13:38:23 -0500 Subject: moved get_network_topic to network.api --- nova/compute/manager.py | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 82cf8ea70..afa98a617 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -163,20 +163,6 @@ class ComputeManager(manager.SchedulerDependentManager): FLAGS.console_topic, FLAGS.console_host) - def get_network_topic(self, context, **kwargs): - """Retrieves the network host for a project on this host""" - # TODO(vish): This method should be memoized. This will make - # the call to get_network_host cheaper, so that - # it can pas messages instead of checking the db - # locally. - if FLAGS.stub_network: - host = FLAGS.network_host - else: - host = self.network_manager.get_network_host(context) - return self.db.queue_get_for(context, - FLAGS.network_topic, - host) - def get_console_pool_info(self, context, console_type): return self.driver.get_console_pool_info(console_type) -- cgit From 42dfb994083449f4d8b395af413ee1a195f3a8ef Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 21 Apr 2011 13:42:23 -0500 Subject: forgot to save --- nova/compute/api.py | 17 ----------------- 1 file changed, 17 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index b700c5005..082fb9935 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -68,23 +68,6 @@ class API(base.Base): self.hostname_factory = hostname_factory super(API, self).__init__(**kwargs) - def get_network_topic(self, context, instance_id): - """Get the network topic for an instance.""" - try: - instance = self.get(context, instance_id) - except exception.NotFound: - LOG.warning(_("Instance %d was not found in get_network_topic"), - instance_id) - raise - - host = instance['host'] - if not host: - raise exception.Error(_("Instance %d has no host") % instance_id) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) - return rpc.call(context, - topic, - {"method": "get_network_topic", "args": {'fake': 1}}) - def _check_injected_file_quota(self, context, injected_files): """ Enforce quota limits on injected files -- cgit From 9c0ffe7281ab6ec0acb3ef007b7c955d83007bd0 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 12 May 2011 14:13:25 -0500 Subject: misc related network manager refactor and cleanup --- nova/compute/manager.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index a92a160f8..03aee0204 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -206,6 +206,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance, vpn=is_vpn) LOG.debug(_("instance network_info: |%s|"), network_info) + self.network_manager.setup_compute_network(context, instance_id) else: # TODO(tr3buchet) not really sure how this should be handled. # virt requires network_info to be passed in but stub_network -- cgit From 1f8ef6907a5e1c1f88b0fc9f28084dbc8014274f Mon Sep 17 00:00:00 2001 From: Jason Kölker Date: Tue, 24 May 2011 12:43:25 -0500 Subject: we don't get the network in a tuples anymore --- nova/compute/api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index c7ee0a119..8000d1804 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -694,8 +694,8 @@ class API(base.Base): # in its info, if this changes, the next few lines will need to # accomodate the info containing floating as well as fixed ip addresses fixed_ip_addrs = [] - for (network, info) in self.network_api.get_instance_nw_info(context, - instance): + for info in self.network_api.get_instance_nw_info(context, + instance): fixed_ip_addrs.extend([ip_dict.ip for ip_dict in info['ips']]) # TODO(tr3buchet): this will associate the floating IP with the first -- cgit From 416f6eef591390e6a53d9aae71ca8fd65a098129 Mon Sep 17 00:00:00 2001 From: Jason Kölker Date: Tue, 24 May 2011 13:21:53 -0500 Subject: its a dict, not a class --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 8000d1804..149d1b7d3 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -696,7 +696,7 @@ class API(base.Base): fixed_ip_addrs = [] for info in self.network_api.get_instance_nw_info(context, instance): - fixed_ip_addrs.extend([ip_dict.ip for ip_dict in info['ips']]) + fixed_ip_addrs.extend([ip_dict['ip'] for ip_dict in info['ips']]) # TODO(tr3buchet): this will associate the floating IP with the first # fixed_ip (lowest id) an instance has. This should be changed to -- cgit From 7aadbbc673ed21748f4b371d9e5a1f80f6884b9b Mon Sep 17 00:00:00 2001 From: Jason Kölker Date: Wed, 25 May 2011 11:31:16 -0500 Subject: we have a list of tuples, not a list of dicts --- nova/compute/api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index b676c2008..69857e8e0 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -754,7 +754,8 @@ class API(base.Base): fixed_ip_addrs = [] for info in self.network_api.get_instance_nw_info(context, instance): - fixed_ip_addrs.extend([ip_dict['ip'] for ip_dict in info['ips']]) + ips = info[1]['ips'] + fixed_ip_addrs.extend([ip_dict['ip'] for ip_dict in ips]) # TODO(tr3buchet): this will associate the floating IP with the first # fixed_ip (lowest id) an instance has. This should be changed to -- cgit From b3b2863a8f76f87a601d0b9fe7cc523ca718310a Mon Sep 17 00:00:00 2001 From: Cerberus Date: Wed, 25 May 2011 16:37:39 -0500 Subject: Fixing divergence --- nova/compute/manager.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c8893d5ba..0a44b4eb0 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1000,9 +1000,10 @@ class ComputeManager(manager.SchedulerDependentManager): {'host': dest}) except exception.NotFound: LOG.info(_('No floating_ip is found for %s.'), i_name) - except: - LOG.error(_("Live migration: Unexpected error:" - "%s cannot inherit floating ip..") % i_name) + except Exception, e: + LOG.error(_("Live migration: Unexpected error: " + "%(i_name)s cannot inherit floating " + "ip.\n%(e)s") % (locals())) # Restore instance/volume state self.recover_live_migration(ctxt, instance_ref, dest) -- cgit From e467ca61f02d8a0adc50578db1d4ae969a1143f4 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 1 Jun 2011 15:01:47 -0500 Subject: small fixes --- nova/compute/api.py | 17 +++++++++++++++++ nova/compute/manager.py | 8 ++++++++ 2 files changed, 25 insertions(+) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 29650e782..6eb302331 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -604,6 +604,23 @@ class API(base.Base): "instance_id": instance_id, "flavor_id": flavor_id}}) + @scheduler_api.reroute_compute("add_fixed_ip") + def add_fixed_ip(self, context, instance_id, network_id): + """add fixed_ip from specified network to given instance""" + self._cast_compute_message('add_fixed_ip_to_instance', context, + instance_id, + network_id) + + @scheduler_api.reroute_compute("add_network_to_project") + def add_network_to_project(self, context, project_id): + """force adds a network to the project""" + # this will raise if zone doesn't know about project so the decorator + # can catch it and pass it down + self.db.project_get(context, project_id) + + # didn't raise so this is the correct zone + self.network_api.add_network_to_project(context, project_id) + @scheduler_api.reroute_compute("pause") def pause(self, context, instance_id): """Pause the given instance.""" diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c8893d5ba..bed20ba01 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -589,6 +589,14 @@ class ComputeManager(manager.SchedulerDependentManager): self.db.migration_update(context, migration_id, {'status': 'finished', }) + @exception.wrap_exception + @checks_instance_lock + def add_fixed_ip_to_instance(self, context, instance_id, network_id): + """calls network_api to add new fixed_ip to instance + only here because of checks_instance_lock""" + self.network_api.add_fixed_ip_to_instance(context, instance_id, + network_id) + @exception.wrap_exception @checks_instance_lock def pause_instance(self, context, instance_id): -- cgit From eee29c8142e530c801d655cf27858297946010ec Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 2 Jun 2011 13:21:35 -0500 Subject: some comment docstring modifications --- nova/compute/api.py | 2 +- nova/compute/manager.py | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 6eb302331..634f08914 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -611,7 +611,7 @@ class API(base.Base): instance_id, network_id) - @scheduler_api.reroute_compute("add_network_to_project") + #TODO(tr3buchet): how to run this in the correct zone? def add_network_to_project(self, context, project_id): """force adds a network to the project""" # this will raise if zone doesn't know about project so the decorator diff --git a/nova/compute/manager.py b/nova/compute/manager.py index bed20ba01..df94c92ad 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -132,7 +132,6 @@ class ComputeManager(manager.SchedulerDependentManager): self.network_api = network.API() self.network_manager = utils.import_object(FLAGS.network_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager) - self.network_api = network.API() self._last_host_check = 0 super(ComputeManager, self).__init__(service_name="compute", *args, **kwargs) @@ -592,10 +591,13 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def add_fixed_ip_to_instance(self, context, instance_id, network_id): - """calls network_api to add new fixed_ip to instance - only here because of checks_instance_lock""" + """calls network_api to add new fixed_ip to instance""" self.network_api.add_fixed_ip_to_instance(context, instance_id, network_id) + instance = self.db.instance_get(context, instance_id) + network_info = self.network_api.get_instance_nw_info(context, instance) + self.driver.inject_network_info(instance, network_info) + self.driver.reset_networking(instance) @exception.wrap_exception @checks_instance_lock -- cgit From 0438855659d89133e588dd4201956a901ed85787 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 8 Jun 2011 12:41:09 -0500 Subject: removed network_info shims in vmops --- nova/compute/manager.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index e3cfc8e0e..60791b5e7 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -605,13 +605,13 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def add_fixed_ip_to_instance(self, context, instance_id, network_id): - """calls network_api to add new fixed_ip to instance""" + """calls network_api to add new fixed_ip to instance + then injects the new network info and resets instance networking + """ self.network_api.add_fixed_ip_to_instance(context, instance_id, network_id) - instance = self.db.instance_get(context, instance_id) - network_info = self.network_api.get_instance_nw_info(context, instance) - self.driver.inject_network_info(instance, network_info) - self.driver.reset_networking(instance) + self.inject_network_info(context, instance_id) + self.reset_network(context, instance_id) @exception.wrap_exception @checks_instance_lock @@ -717,16 +717,14 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def reset_network(self, context, instance_id): """Reset networking on the given instance.""" - context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) + instance = self.db.instance_get(context, instance_id) LOG.debug(_('instance %s: reset network'), instance_id, context=context) - self.driver.reset_network(instance_ref) + self.driver.reset_network(instance) @checks_instance_lock def inject_network_info(self, context, instance_id): """Inject network info for the given instance.""" - context = context.elevated() LOG.debug(_('instance %s: inject network info'), instance_id, context=context) instance = self.db.instance_get(context, instance_id) -- cgit From b425aa0c49aba5d52250d3b7d0cd282464a32141 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 10 Jun 2011 14:57:02 -0500 Subject: misc argument alterations --- nova/compute/api.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index d366d96eb..2a2dc6f0e 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -216,8 +216,7 @@ class API(base.Base): groups, MAC address, etc). This will called by create() in the majority of situations, but all-at-once style Schedulers may initiate the call.""" - instance = dict(mac_address=utils.generate_mac(), - launch_index=num, + instance = dict(launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] -- cgit From d632e9883ef2fecb74b3bfdc62b8871a2c74ff93 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 15 Jun 2011 11:29:07 -0500 Subject: updated finish_resize to accept network_info, updated compute and tests in accordance --- nova/compute/manager.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index cf9a97b4c..d29c4348c 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -597,7 +597,9 @@ class ComputeManager(manager.SchedulerDependentManager): # reload the updated instance ref # FIXME(mdietz): is there reload functionality? instance_ref = self.db.instance_get(context, instance_id) - self.driver.finish_resize(instance_ref, disk_info) + network_info = self.network_api.get_instance_nw_info(context, + instance) + self.driver.finish_resize(instance_ref, disk_info, network_info) self.db.migration_update(context, migration_id, {'status': 'finished', }) -- cgit From 2c59140ba50370e069b233aff74bd26a6af4c093 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 15 Jun 2011 11:34:30 -0500 Subject: typo --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d29c4348c..d08286224 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -596,10 +596,10 @@ class ComputeManager(manager.SchedulerDependentManager): # reload the updated instance ref # FIXME(mdietz): is there reload functionality? - instance_ref = self.db.instance_get(context, instance_id) + instance = self.db.instance_get(context, instance_id) network_info = self.network_api.get_instance_nw_info(context, instance) - self.driver.finish_resize(instance_ref, disk_info, network_info) + self.driver.finish_resize(instance, disk_info, network_info) self.db.migration_update(context, migration_id, {'status': 'finished', }) -- cgit From 8bd0296224b70e318e208a4570b4acaa599f62c8 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Fri, 17 Jun 2011 18:26:31 +0400 Subject: Made hostname independent from ec2 id. Add generation of hostnames based on display name. --- nova/compute/api.py | 24 +++++++++++++++++++++--- nova/compute/manager.py | 6 +++--- 2 files changed, 24 insertions(+), 6 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index e2c4cf8d7..844192404 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -47,9 +47,25 @@ flags.DEFINE_integer('find_host_timeout', 30, 'Timeout after NN seconds when looking for a host.') -def generate_default_hostname(instance_id): +def generate_default_hostname(instance): """Default function to generate a hostname given an instance reference.""" - return str(instance_id) + display_name = instance['display_name'] + if display_name is None: + return 'server_%d' % (instance['id'],) + table = '' + deletions = '' + for i in xrange(256): + c = chr(i) + if ('a' <= c <= 'z') or ('0' <= c <= '9') or (c == '-'): + table += c + elif c == ' ': + table += '_' + elif ('A' <= c <= 'Z'): + table += c.lower() + else: + table += '\0' + deletions += c + return display_name.encode('latin-1', 'ignore').translate(table, deletions) class API(base.Base): @@ -256,10 +272,12 @@ class API(base.Base): security_group_id) # Set sane defaults if not specified - updates = dict(hostname=self.hostname_factory(instance_id)) + updates = {} if (not hasattr(instance, 'display_name') or instance.display_name is None): updates['display_name'] = "Server %s" % instance_id + instance['display_name'] = updates['display_name'] + updates['hostname'] = self.hostname_factory(instance) instance = self.update(context, instance_id, **updates) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 245958de7..e84c434d2 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -933,7 +933,7 @@ class ComputeManager(manager.SchedulerDependentManager): # Getting instance info instance_ref = self.db.instance_get(context, instance_id) - ec2_id = instance_ref['hostname'] + hostname = instance_ref['hostname'] # Getting fixed ips fixed_ip = self.db.instance_get_fixed_address(context, instance_id) @@ -942,7 +942,7 @@ class ComputeManager(manager.SchedulerDependentManager): # If any volume is mounted, prepare here. if not instance_ref['volumes']: - LOG.info(_("%s has no volume."), ec2_id) + LOG.info(_("%s has no volume."), hostname) else: for v in instance_ref['volumes']: self.volume_manager.setup_compute_volume(context, v['id']) @@ -965,7 +965,7 @@ class ComputeManager(manager.SchedulerDependentManager): raise else: LOG.warn(_("setup_compute_network() failed %(cnt)d." - "Retry up to %(max_retry)d for %(ec2_id)s.") + "Retry up to %(max_retry)d for %(hostname)s.") % locals()) time.sleep(1) -- cgit From 749eac4d36ff2f7a855044d677f3cde07451f32a Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 17 Jun 2011 13:47:28 -0500 Subject: bunch of docstring changes --- nova/compute/api.py | 8 ++++---- nova/compute/manager.py | 5 +++-- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index b05db3ca5..6c31a9697 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -716,14 +716,14 @@ class API(base.Base): @scheduler_api.reroute_compute("add_fixed_ip") def add_fixed_ip(self, context, instance_id, network_id): - """add fixed_ip from specified network to given instance""" + """Add fixed_ip from specified network to given instance.""" self._cast_compute_message('add_fixed_ip_to_instance', context, instance_id, network_id) #TODO(tr3buchet): how to run this in the correct zone? def add_network_to_project(self, context, project_id): - """force adds a network to the project""" + """Force adds a network to the project.""" # this will raise if zone doesn't know about project so the decorator # can catch it and pass it down self.db.project_get(context, project_id) @@ -873,9 +873,9 @@ class API(base.Base): return instance def associate_floating_ip(self, context, instance_id, address): - """makes calls to network_api to associate_floating_ip + """Makes calls to network_api to associate_floating_ip. - address is a string floating ip address + :param address: is a string floating ip address """ instance = self.get(context, instance_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d08286224..2ad0c0d04 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -607,8 +607,9 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def add_fixed_ip_to_instance(self, context, instance_id, network_id): - """calls network_api to add new fixed_ip to instance - then injects the new network info and resets instance networking + """Calls network_api to add new fixed_ip to instance + then injects the new network info and resets instance networking. + """ self.network_api.add_fixed_ip_to_instance(context, instance_id, network_id) -- cgit From 0d426ae8d0fe4e697648e58d1791e1c40b78deab Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 20 Jun 2011 16:56:59 -0700 Subject: fix lp 798361 --- nova/compute/api.py | 55 +++++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 27 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index e6cffb6b3..cb73af94c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -200,18 +200,7 @@ class API(base.Base): if ramdisk_id: image_service.show(context, ramdisk_id) - if security_group is None: - security_group = ['default'] - if not type(security_group) is list: - security_group = [security_group] - - security_groups = [] self.ensure_default_security_group(context) - for security_group_name in security_group: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) - security_groups.append(group['id']) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) @@ -245,15 +234,19 @@ class API(base.Base): 'os_type': os_type, 'vm_mode': vm_mode} - return (num_instances, base_options, security_groups) + return (num_instances, base_options) def create_db_entry_for_new_instance(self, context, base_options, - security_groups, block_device_mapping, num=1): + security_group, block_device_mapping, num=1): """Create an entry in the DB for this new instance, - including any related table updates (such as security - groups, MAC address, etc). This will called by create() - in the majority of situations, but all-at-once style - Schedulers may initiate the call.""" + including any related table updates (such as security group, + MAC address, etc). + + This will called by create() in the majority of situations, + but create_all_at_once() style Schedulers may initiate the call. + If you are changing this method, be sure to update both + call paths. + """ instance = dict(mac_address=utils.generate_mac(), launch_index=num, **base_options) @@ -261,13 +254,24 @@ class API(base.Base): instance_id = instance['id'] elevated = context.elevated() - if not security_groups: - security_groups = [] + if security_group is None: + security_group = ['default'] + if not type(security_group) is list: + security_group = [security_group] + + security_groups = [] + for security_group_name in security_group: + group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + security_groups.append(group['id']) + for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, security_group_id) - + + block_device_mapping = block_device_mapping or [] # NOTE(yamahata) # tell vm driver to attach volume at boot time by updating # BlockDeviceMapping @@ -339,12 +343,11 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None): + reservation_id=None, block_device_mapping=None): """Provision the instances by passing the whole request to the Scheduler for execution. Returns a Reservation ID related to the creation of all of these instances.""" - num_instances, base_options, security_groups = \ - self._check_create_parameters( + num_instances, base_options = self._check_create_parameters( context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, @@ -379,8 +382,7 @@ class API(base.Base): Returns a list of instance dicts. """ - num_instances, base_options, security_groups = \ - self._check_create_parameters( + num_instances, base_options = self._check_create_parameters( context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, @@ -390,12 +392,11 @@ class API(base.Base): injected_files, admin_password, zone_blob, reservation_id) - block_device_mapping = block_device_mapping or [] instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = self.create_db_entry_for_new_instance(context, - base_options, security_groups, + base_options, security_group, block_device_mapping, num=num) instances.append(instance) instance_id = instance['id'] -- cgit From 2d74b48984783ae09c2f29bf5c6fa0f81e6d32c2 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 20 Jun 2011 17:21:22 -0700 Subject: trunk merge --- nova/compute/api.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index cb73af94c..0791bab51 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -379,6 +379,9 @@ class API(base.Base): Scheduler drivers, but may remove the effectiveness of the more complicated drivers. + NOTE: If you change this method, be sure to change + create_all_at_once() at the same time! + Returns a list of instance dicts. """ -- cgit From d99b17895747959e332e5645aedd0a2ddc0e21da Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Tue, 21 Jun 2011 12:19:01 -0700 Subject: pep8 --- nova/compute/api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 0791bab51..1c001a8fc 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -241,7 +241,7 @@ class API(base.Base): """Create an entry in the DB for this new instance, including any related table updates (such as security group, MAC address, etc). - + This will called by create() in the majority of situations, but create_all_at_once() style Schedulers may initiate the call. If you are changing this method, be sure to update both @@ -270,7 +270,7 @@ class API(base.Base): self.db.instance_add_security_group(elevated, instance_id, security_group_id) - + block_device_mapping = block_device_mapping or [] # NOTE(yamahata) # tell vm driver to attach volume at boot time by updating -- cgit From 75a87df739effe840e6cb39c976002e99b49c796 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 22 Jun 2011 13:31:28 -0500 Subject: Round 1 of backup with rotation. --- nova/compute/api.py | 32 ++++++++++++++++++++++++++++++-- nova/compute/manager.py | 20 ++++++++++++++++++-- 2 files changed, 48 insertions(+), 4 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index a7ea88d51..365aa1c5d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -701,18 +701,46 @@ class API(base.Base): raise exception.Error(_("Unable to find host for Instance %s") % instance_id) + def backup(self, context, instance_id, backup_type, rotation): + """Backup the given instance + + instance_id - int - id representing the instance + backup_type - str - whether it's 'daily' or 'weekly' + rotation - int - number of backups to keep around + """ + name = backup_type # daily backups are called 'daily' + recv_meta = self._snapshot(context, instance_id, name, backup_type, + rotation=rotation) + return recv_meta + def snapshot(self, context, instance_id, name): """Snapshot the given instance. :returns: A dict containing image metadata """ - properties = {'instance_id': str(instance_id), + return self._snapshot(context, instance_id, name, 'snapshot') + + def _snapshot(self, context, instance_id, name, image_type, rotation=None): + """Snapshot an instance on this host. + + :param context: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param name: string for name of the snapshot + :param image_type: snapshot | daily | weekly + :param rotation: int representing how many backups to keep around; + None if rotation shouldn't be used (as in the case of snapshots) + """ + instance = db.api.instance_get(context, instance_id) + properties = {'instance_uuid': instance['uuid'], 'user_id': str(context.user_id), 'image_state': 'creating'} + if image_type != 'snapshot': + properties['backup_type'] = image_type sent_meta = {'name': name, 'is_public': False, 'status': 'creating', 'properties': properties} recv_meta = self.image_service.create(context, sent_meta) - params = {'image_id': recv_meta['id']} + params = {'image_id': recv_meta['id'], 'image_type': image_type, + 'rotation': rotation} self._cast_compute_message('snapshot_instance', context, instance_id, params=params) return recv_meta diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 4e006e677..bc6981c58 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -473,8 +473,17 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id) @exception.wrap_exception - def snapshot_instance(self, context, instance_id, image_id): - """Snapshot an instance on this host.""" + def snapshot_instance(self, context, instance_id, image_id, + image_type='snapshot', rotation=None): + """Snapshot an instance on this host. + + :param context: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param image_id: glance.db.sqlalchemy.models.Image.Id + :param image_type: snapshot | daily | weekly + :param rotation: int representing how many backups to keep around; + None if rotation shouldn't be used (as in the case of snapshots) + """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) @@ -493,6 +502,13 @@ class ComputeManager(manager.SchedulerDependentManager): 'expected: %(running)s)') % locals()) self.driver.snapshot(instance_ref, image_id) + if rotation: + self.rotate_backups(context, instance_id, image_type, rotation) + + def rotate_backups(self, context, instance_id, image_type, rotation): + """ + """ + pass @exception.wrap_exception @checks_instance_lock -- cgit From 145ee4a958e97759bc4a516bda758b774761a24f Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 22 Jun 2011 15:35:25 -0500 Subject: fixed error --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 1894e9592..8021154eb 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -301,7 +301,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id, power_state.BUILDING) try: - self.driver.spawn(instance_ref, network_info, block_device_mapping) + self.driver.spawn(instance, network_info, block_device_mapping) except Exception as ex: # pylint: disable=W0702 msg = _("Instance '%(instance_id)s' failed to spawn. Is " "virtualization enabled in the BIOS? Details: " -- cgit From 1f99e500a99a4d66639f04f2c723058c4d1dfc1d Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 22 Jun 2011 13:45:24 -0700 Subject: Check API request for min_count/max_count for number of instances to build --- nova/compute/api.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index a7ea88d51..44e11d187 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -143,7 +143,7 @@ class API(base.Base): def _check_create_parameters(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, + min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, @@ -154,6 +154,10 @@ class API(base.Base): if not instance_type: instance_type = instance_types.get_default_instance_type() + if not min_count: + min_count = 1 + if not max_count: + max_count = min_count num_instances = quota.allowed_instances(context, max_count, instance_type) @@ -338,7 +342,7 @@ class API(base.Base): def create_all_at_once(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, + min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, @@ -368,7 +372,7 @@ class API(base.Base): def create(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, + min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, -- cgit From ab2a77d0c6f738fe70b5d5a77fa7f97bf1f1f88b Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 22 Jun 2011 16:14:01 -0500 Subject: Adding backup rotation --- nova/compute/api.py | 7 +++---- nova/compute/manager.py | 40 ++++++++++++++++++++++++++++++++++------ 2 files changed, 37 insertions(+), 10 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 365aa1c5d..c0cb2e18a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -722,7 +722,7 @@ class API(base.Base): def _snapshot(self, context, instance_id, name, image_type, rotation=None): """Snapshot an instance on this host. - + :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param name: string for name of the snapshot @@ -733,9 +733,8 @@ class API(base.Base): instance = db.api.instance_get(context, instance_id) properties = {'instance_uuid': instance['uuid'], 'user_id': str(context.user_id), - 'image_state': 'creating'} - if image_type != 'snapshot': - properties['backup_type'] = image_type + 'image_state': 'creating', + 'image_type': image_type} sent_meta = {'name': name, 'is_public': False, 'status': 'creating', 'properties': properties} recv_meta = self.image_service.create(context, sent_meta) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index bc6981c58..44abd5d89 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -43,6 +43,7 @@ import time import functools from eventlet import greenthread +from operator import itemgetter from nova import exception from nova import flags @@ -476,7 +477,7 @@ class ComputeManager(manager.SchedulerDependentManager): def snapshot_instance(self, context, instance_id, image_id, image_type='snapshot', rotation=None): """Snapshot an instance on this host. - + :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param image_id: glance.db.sqlalchemy.models.Image.Id @@ -502,13 +503,40 @@ class ComputeManager(manager.SchedulerDependentManager): 'expected: %(running)s)') % locals()) self.driver.snapshot(instance_ref, image_id) - if rotation: - self.rotate_backups(context, instance_id, image_type, rotation) + if rotation and image_type == 'snapshot': + raise exception.ImageRotationNotAllowed + elif rotation: + instance_uuid = instance_ref['uuid'] + self.rotate_backups(context, instance_uuid, image_type, rotation) - def rotate_backups(self, context, instance_id, image_type, rotation): - """ + def rotate_backups(self, context, instance_uuid, image_type, rotation): + """Delete excess backups associated to an instance. + + Instances are allowed a fixed number of backups (the rotation number); + this method deletes the oldest backups that exceed the rotation + threshold. + + :param context: security context + :param instance_uuid: string representing uuid of instance + :param image_type: snapshot | daily | weekly + :param rotation: int representing how many backups to keep around; + None if rotation shouldn't be used (as in the case of snapshots) """ - pass + image_service = nova.image.get_default_image_service() + filters = {'property-image-type': image_type, + 'property-instance-uuid': instance_uuid} + images = image_service.detail(context, filters=filters) + if len(images) > rotation: + # Sort oldest (by created_at) to end of list + images.sort(key=itemgetter('created_at'), reverse=True) + + # NOTE(sirp): this deletes all backups that exceed the rotation + # limit + excess = len(images) - rotation + for i in xrange(excess): + image = images.pop() + image_id = image['id'] + image_service.delete(context, image_id) @exception.wrap_exception @checks_instance_lock -- cgit From 61da39ecfefe441d352e72c99884157c5df8173e Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 22 Jun 2011 17:36:09 -0500 Subject: better debug statement around associating floating ips when multiple fixed_ips exist --- nova/compute/api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index c76a00d38..9f4d4899f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -978,7 +978,8 @@ class API(base.Base): "unable to associate floating ip") % instance_id raise exception.ApiError(msg) if len(fixed_ip_addrs) > 1: - LOG.warning(_("multiple fixed_ips exist, using the first")) + LOG.warning(_("multiple fixed_ips exist, using the first: %s"), + fixed_ip_addrs[0]) self.network_api.associate_floating_ip(context, floating_ip=address, fixed_ip=fixed_ip_addrs[0]) -- cgit From b9a861d72f1a98510dd4b68e547b434388ab9a64 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 23 Jun 2011 05:20:50 -0700 Subject: add support for compute_api.get_all() recursing zones for more than just reservation_id --- nova/compute/api.py | 66 +++++++++++++++++++++++++++-------------------------- 1 file changed, 34 insertions(+), 32 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index a7ea88d51..9db83d65f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -603,50 +603,52 @@ class API(base.Base): """ return self.get(context, instance_id) - def get_all_across_zones(self, context, reservation_id): - """Get all instances with this reservation_id, across - all available Zones (if any). - """ - context = context.elevated() - instances = self.db.instance_get_all_by_reservation( - context, reservation_id) - - children = scheduler_api.call_zone_method(context, "list", - novaclient_collection_name="servers", - reservation_id=reservation_id) - - for zone, servers in children: - for server in servers: - # Results are ready to send to user. No need to scrub. - server._info['_is_precooked'] = True - instances.append(server._info) - return instances - def get_all(self, context, project_id=None, reservation_id=None, - fixed_ip=None): + fixed_ip=None, recurse_zones=False): """Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system. """ - if reservation_id is not None: - return self.get_all_across_zones(context, reservation_id) + admin_context = context.elevated() - if fixed_ip is not None: - return self.db.fixed_ip_get_instance(context, fixed_ip) - - if project_id or not context.is_admin: + if reservation_id is not None: + recurse_zones = True + instances = self.db.instance_get_all_by_reservation( + context, reservation_id) + elif fixed_ip is not None: + instances = self.db.fixed_ip_get_instance(context, fixed_ip) + elif project_id or not context.is_admin: if not context.project: - return self.db.instance_get_all_by_user( + instances = self.db.instance_get_all_by_user( context, context.user_id) + else: + if project_id is None: + project_id = context.project_id + instances = self.db.instance_get_all_by_project( + context, project_id) + else: + instances = self.db.instance_get_all(context) + + if not isinstance(instances, list): + instances = [instances] - if project_id is None: - project_id = context.project_id + if not recurse_zones: + return instances - return self.db.instance_get_all_by_project( - context, project_id) + children = scheduler_api.call_zone_method(admin_context, "list", + novaclient_collection_name="servers", + reservation_id=reservation_id, + project_id=project_id, + fixed_ip=fixed_ip + recurse_zones=True) - return self.db.instance_get_all(context) + for zone, servers in children: + for server in servers: + # Results are ready to send to user. No need to scrub. + server._info['_is_precooked'] = True + instances.append(server._info) + return instances def _cast_compute_message(self, method, context, instance_id, host=None, params=None): -- cgit From 1aa7e746d5918f2a664da1937183b66fe31f6bd4 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 23 Jun 2011 05:35:04 -0700 Subject: typo --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 9db83d65f..1b3997db7 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -640,7 +640,7 @@ class API(base.Base): novaclient_collection_name="servers", reservation_id=reservation_id, project_id=project_id, - fixed_ip=fixed_ip + fixed_ip=fixed_ip, recurse_zones=True) for zone, servers in children: -- cgit From 07404e266a4a6b690c62624a9a5e47d60cab7d5b Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 23 Jun 2011 06:33:25 -0700 Subject: fixes for recurse_zones and None instances with compute's get_all --- nova/compute/api.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 1b3997db7..e31edf7bb 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -610,7 +610,6 @@ class API(base.Base): If there is no filter and the context is an admin, it will retreive all instances in the system. """ - admin_context = context.elevated() if reservation_id is not None: recurse_zones = True @@ -630,12 +629,15 @@ class API(base.Base): else: instances = self.db.instance_get_all(context) - if not isinstance(instances, list): + if instances is None: + instances = [] + elif not isinstance(instances, list): instances = [instances] if not recurse_zones: return instances + admin_context = context.elevated() children = scheduler_api.call_zone_method(admin_context, "list", novaclient_collection_name="servers", reservation_id=reservation_id, -- cgit From b637dee5a5c48f86f6b8b12b3b374344b4ffc5b7 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Thu, 23 Jun 2011 07:15:20 -0700 Subject: handle errors for listing an instance by IP address --- nova/compute/api.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 02963068a..31333ad18 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -620,7 +620,12 @@ class API(base.Base): instances = self.db.instance_get_all_by_reservation( context, reservation_id) elif fixed_ip is not None: - instances = self.db.fixed_ip_get_instance(context, fixed_ip) + try: + instances = self.db.fixed_ip_get_instance(context, fixed_ip) + except exception.FloatingIpNotFound, e: + if not recurse_zones: + raise + instances = None elif project_id or not context.is_admin: if not context.project: instances = self.db.instance_get_all_by_user( -- cgit From 63a9216ecbaab20fc7dfb82afb9fe0e2f3fbded4 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 23 Jun 2011 15:35:26 -0500 Subject: Adding missing import. --- nova/compute/manager.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 44abd5d89..3c849286e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -47,6 +47,7 @@ from operator import itemgetter from nova import exception from nova import flags +import nova.image from nova import log as logging from nova import manager from nova import network -- cgit From 2028222a5ed47dc82b49f51969d237c4eece50e7 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 23 Jun 2011 16:17:54 -0500 Subject: Fixed filter property and added logging. --- nova/compute/manager.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3c849286e..d0ca1ff0d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -524,9 +524,10 @@ class ComputeManager(manager.SchedulerDependentManager): None if rotation shouldn't be used (as in the case of snapshots) """ image_service = nova.image.get_default_image_service() - filters = {'property-image-type': image_type, - 'property-instance-uuid': instance_uuid} + filters = {'property-image_type': image_type, + 'property-instance_uuid': instance_uuid} images = image_service.detail(context, filters=filters) + LOG.debug(_("Found %d images (rotation: %d)" % (len(images), rotation))) if len(images) > rotation: # Sort oldest (by created_at) to end of list images.sort(key=itemgetter('created_at'), reverse=True) @@ -534,9 +535,11 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(sirp): this deletes all backups that exceed the rotation # limit excess = len(images) - rotation + LOG.debug(_("Rotating out %d backups" % excess)) for i in xrange(excess): image = images.pop() image_id = image['id'] + LOG.debug(_("Deleting image %d" % image_id)) image_service.delete(context, image_id) @exception.wrap_exception -- cgit From e3c1a6742b16add04d76631b9dbd4f2ef016e0b3 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 23 Jun 2011 16:19:08 -0500 Subject: PEP8 cleanup. --- nova/compute/manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d0ca1ff0d..4bd7d434e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -527,7 +527,8 @@ class ComputeManager(manager.SchedulerDependentManager): filters = {'property-image_type': image_type, 'property-instance_uuid': instance_uuid} images = image_service.detail(context, filters=filters) - LOG.debug(_("Found %d images (rotation: %d)" % (len(images), rotation))) + LOG.debug(_("Found %d images (rotation: %d)" % + (len(images), rotation))) if len(images) > rotation: # Sort oldest (by created_at) to end of list images.sort(key=itemgetter('created_at'), reverse=True) -- cgit From a045cd5fdd00b3e52f46181017077146abe8df9f Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 23 Jun 2011 16:54:28 -0500 Subject: Fixed syntax errors. --- nova/compute/manager.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 4bd7d434e..ca66d0387 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -527,9 +527,10 @@ class ComputeManager(manager.SchedulerDependentManager): filters = {'property-image_type': image_type, 'property-instance_uuid': instance_uuid} images = image_service.detail(context, filters=filters) - LOG.debug(_("Found %d images (rotation: %d)" % - (len(images), rotation))) - if len(images) > rotation: + num_images = len(images) + LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)" + % locals())) + if num_images > rotation: # Sort oldest (by created_at) to end of list images.sort(key=itemgetter('created_at'), reverse=True) -- cgit From 594d5c7a98f2b4e6ea2d866f10c67cbdaa88ce0c Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 24 Jun 2011 15:03:01 -0500 Subject: Refactored backup rotate. --- nova/compute/api.py | 33 ++++++++++++++++++++------------- nova/compute/manager.py | 29 ++++++++++++++++++++--------- 2 files changed, 40 insertions(+), 22 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index c0cb2e18a..9c6f0ef9d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -701,32 +701,38 @@ class API(base.Base): raise exception.Error(_("Unable to find host for Instance %s") % instance_id) - def backup(self, context, instance_id, backup_type, rotation): + def backup(self, context, instance_id, name, backup_type, rotation): """Backup the given instance - instance_id - int - id representing the instance - backup_type - str - whether it's 'daily' or 'weekly' - rotation - int - number of backups to keep around - """ + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param name: name of the backup or snapshot name = backup_type # daily backups are called 'daily' - recv_meta = self._snapshot(context, instance_id, name, backup_type, - rotation=rotation) + :param rotation: int representing how many backups to keep around; + None if rotation shouldn't be used (as in the case of snapshots) + """ + recv_meta = self._create_image(context, instance_id, name, 'backup', + backup_type=backup_type, rotation=rotation) return recv_meta def snapshot(self, context, instance_id, name): """Snapshot the given instance. + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param name: name of the backup or snapshot + :returns: A dict containing image metadata """ - return self._snapshot(context, instance_id, name, 'snapshot') + return self._create_image(context, instance_id, name, 'snapshot') - def _snapshot(self, context, instance_id, name, image_type, rotation=None): - """Snapshot an instance on this host. + def _create_image(self, context, instance_id, name, image_type, + backup_type=None, rotation=None): + """Create snapshot or backup for an instance on this host. :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param name: string for name of the snapshot - :param image_type: snapshot | daily | weekly + :param image_type: snapshot | backup + :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) """ @@ -734,12 +740,13 @@ class API(base.Base): properties = {'instance_uuid': instance['uuid'], 'user_id': str(context.user_id), 'image_state': 'creating', - 'image_type': image_type} + 'image_type': image_type, + 'backup_type': backup_type} sent_meta = {'name': name, 'is_public': False, 'status': 'creating', 'properties': properties} recv_meta = self.image_service.create(context, sent_meta) params = {'image_id': recv_meta['id'], 'image_type': image_type, - 'rotation': rotation} + 'backup_type': backup_type, 'rotation': rotation} self._cast_compute_message('snapshot_instance', context, instance_id, params=params) return recv_meta diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ca66d0387..1458ea41f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -476,13 +476,15 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception def snapshot_instance(self, context, instance_id, image_id, - image_type='snapshot', rotation=None): + image_type='snapshot', backup_type=None, + rotation=None): """Snapshot an instance on this host. :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param image_id: glance.db.sqlalchemy.models.Image.Id - :param image_type: snapshot | daily | weekly + :param image_type: snapshot | backup + :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) """ @@ -504,13 +506,21 @@ class ComputeManager(manager.SchedulerDependentManager): 'expected: %(running)s)') % locals()) self.driver.snapshot(instance_ref, image_id) - if rotation and image_type == 'snapshot': + + if image_type == 'snapshot' and rotation: raise exception.ImageRotationNotAllowed - elif rotation: - instance_uuid = instance_ref['uuid'] - self.rotate_backups(context, instance_uuid, image_type, rotation) + elif image_type == 'backup': + if rotation: + instance_uuid = instance_ref['uuid'] + self.rotate_backups(context, instance_uuid, backup_type, + rotation) + else: + raise exception.RotationRequiredForBackup + else: + raise Exception(_('Image type not recognized %s') % image_type) + - def rotate_backups(self, context, instance_uuid, image_type, rotation): + def rotate_backups(self, context, instance_uuid, backup_type, rotation): """Delete excess backups associated to an instance. Instances are allowed a fixed number of backups (the rotation number); @@ -519,12 +529,13 @@ class ComputeManager(manager.SchedulerDependentManager): :param context: security context :param instance_uuid: string representing uuid of instance - :param image_type: snapshot | daily | weekly + :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) """ image_service = nova.image.get_default_image_service() - filters = {'property-image_type': image_type, + filters = {'property-image_type': 'backup', + 'property-backup_type': backup_type, 'property-instance_uuid': instance_uuid} images = image_service.detail(context, filters=filters) num_images = len(images) -- cgit From a1b9aea9d12eaa32f869e5a4a59b01788e6c836d Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 24 Jun 2011 15:04:34 -0500 Subject: PEP8 cleanup. --- nova/compute/api.py | 2 +- nova/compute/manager.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 9c6f0ef9d..efd6d166b 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -719,7 +719,7 @@ class API(base.Base): :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param name: name of the backup or snapshot - + :returns: A dict containing image metadata """ return self._create_image(context, instance_id, name, 'snapshot') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 1458ea41f..d4e1d3a1e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -506,7 +506,7 @@ class ComputeManager(manager.SchedulerDependentManager): 'expected: %(running)s)') % locals()) self.driver.snapshot(instance_ref, image_id) - + if image_type == 'snapshot' and rotation: raise exception.ImageRotationNotAllowed elif image_type == 'backup': @@ -519,7 +519,6 @@ class ComputeManager(manager.SchedulerDependentManager): else: raise Exception(_('Image type not recognized %s') % image_type) - def rotate_backups(self, context, instance_uuid, backup_type, rotation): """Delete excess backups associated to an instance. -- cgit From 3b85d8080ee06436873bd2e4d8f358e4686da1bf Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 24 Jun 2011 15:18:05 -0500 Subject: Fixed snapshot logic. --- nova/compute/manager.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d4e1d3a1e..8708768fb 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -507,8 +507,9 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.snapshot(instance_ref, image_id) - if image_type == 'snapshot' and rotation: - raise exception.ImageRotationNotAllowed + if image_type == 'snapshot': + if rotation: + raise exception.ImageRotationNotAllowed elif image_type == 'backup': if rotation: instance_uuid = instance_ref['uuid'] -- cgit From 883992df19441544deb9aa5f60f2a77ab1f46567 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Mon, 27 Jun 2011 16:50:17 -0500 Subject: Review feedback. --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 9d71ff922..156b197e1 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -523,14 +523,14 @@ class ComputeManager(manager.SchedulerDependentManager): if image_type == 'snapshot': if rotation: - raise exception.ImageRotationNotAllowed + raise exception.ImageRotationNotAllowed() elif image_type == 'backup': if rotation: instance_uuid = instance_ref['uuid'] self.rotate_backups(context, instance_uuid, backup_type, rotation) else: - raise exception.RotationRequiredForBackup + raise exception.RotationRequiredForBackup() else: raise Exception(_('Image type not recognized %s') % image_type) -- cgit From 4b8bcf30f934ea91290b7fe41536ba06ee832b3f Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Tue, 28 Jun 2011 08:57:05 +0000 Subject: Re-merging code for generating system-usages to get around bzr merge braindeadness. --- nova/compute/manager.py | 94 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 923feaa59..d0e9cdf95 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -50,10 +50,12 @@ from nova import flags from nova import log as logging from nova import manager from nova import network +from nova import notifier from nova import rpc from nova import utils from nova import volume from nova.compute import power_state +from nova.notifier import api as notifier_api from nova.virt import driver @@ -275,6 +277,21 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_launched_at(context, instance_id) self._update_state(context, instance_id) + usage_info = dict( + tenant_id=instance_ref['project_id'], + user_id=instance_ref['user_id'], + instance_id=instance_ref['id'], + instance_type=instance_ref['instance_type']['name'], + instance_type_id=instance_ref['instance_type_id'], + display_name=instance_ref['display_name'], + created_at=str(instance_ref['created_at']), + launched_at=str(instance_ref['launched_at']) \ + if instance_ref['launched_at'] else '', + image_id=instance_ref['image_id']) + notifier_api.notify('compute.%s' % self.host, + 'compute.instance.create', + notifier_api.INFO, + usage_info) @exception.wrap_exception @checks_instance_lock @@ -327,6 +344,21 @@ class ComputeManager(manager.SchedulerDependentManager): # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) + usage_info = dict( + tenant_id=instance_ref['project_id'], + user_id=instance_ref['user_id'], + instance_id=instance_ref['id'], + instance_type=instance_ref['instance_type']['name'], + instance_type_id=instance_ref['instance_type_id'], + display_name=instance_ref['display_name'], + created_at=str(instance_ref['created_at']), + launched_at=str(instance_ref['launched_at']) \ + if instance_ref['launched_at'] else '', + image_id=instance_ref['image_id']) + notifier_api.notify('compute.%s' % self.host, + 'compute.instance.delete', + notifier_api.INFO, + usage_info) @exception.wrap_exception @checks_instance_lock @@ -354,6 +386,21 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_image_id(context, instance_id, image_id) self._update_launched_at(context, instance_id) self._update_state(context, instance_id) + usage_info = dict( + tenant_id=instance_ref['project_id'], + user_id=instance_ref['user_id'], + instance_id=instance_ref['id'], + instance_type=instance_ref['instance_type']['name'], + instance_type_id=instance_ref['instance_type_id'], + display_name=instance_ref['display_name'], + created_at=str(instance_ref['created_at']), + launched_at=str(instance_ref['launched_at']) \ + if instance_ref['launched_at'] else '', + image_id=image_id) + notifier_api.notify('compute.%s' % self.host, + 'compute.instance.rebuild', + notifier_api.INFO, + usage_info) @exception.wrap_exception @checks_instance_lock @@ -501,6 +548,21 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) self.driver.destroy(instance_ref) + usage_info = dict( + tenant_id=instance_ref['project_id'], + user_id=instance_ref['user_id'], + instance_id=instance_ref['id'], + instance_type=instance_ref['instance_type']['name'], + instance_type_id=instance_ref['instance_type_id'], + display_name=instance_ref['display_name'], + created_at=str(instance_ref['created_at']), + launched_at=str(instance_ref['launched_at']) \ + if instance_ref['launched_at'] else '', + image_id=instance_ref['image_id']) + notifier_api.notify('compute.%s' % self.host, + 'compute.instance.resize.confirm', + notifier_api.INFO, + usage_info) @exception.wrap_exception @checks_instance_lock @@ -548,6 +610,21 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.revert_resize(instance_ref) self.db.migration_update(context, migration_id, {'status': 'reverted'}) + usage_info = dict( + tenant_id=instance_ref['project_id'], + user_id=instance_ref['user_id'], + instance_id=instance_ref['id'], + instance_type=instance_type['name'], + instance_type_id=instance_type['id'], + display_name=instance_ref['display_name'], + created_at=str(instance_ref['created_at']), + launched_at=str(instance_ref['launched_at']) \ + if instance_ref['launched_at'] else '', + image_id=instance_ref['image_id']) + notifier_api.notify('compute.%s' % self.host, + 'compute.instance.resize.revert', + notifier_api.INFO, + usage_info) @exception.wrap_exception @checks_instance_lock @@ -584,6 +661,23 @@ class ComputeManager(manager.SchedulerDependentManager): 'migration_id': migration_ref['id'], 'instance_id': instance_id, }, }) + usage_info = dict( + tenant_id=instance_ref['project_id'], + user_id=instance_ref['user_id'], + instance_id=instance_ref['id'], + instance_type=instance_ref['instance_type']['name'], + instance_type_id=instance_ref['instance_type_id'], + new_instance_type=instance_type['name'], + new_instance_type_id=instance_type['id'], + display_name=instance_ref['display_name'], + created_at=str(instance_ref['created_at']), + launched_at=str(instance_ref['launched_at']) \ + if instance_ref['launched_at'] else '', + image_id=instance_ref['image_id']) + notifier_api.notify('compute.%s' % self.host, + 'compute.instance.resize.prep', + notifier_api.INFO, + usage_info) @exception.wrap_exception @checks_instance_lock -- cgit From 4c1d05d27f207e71546f20c4e603839afc232b5a Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Tue, 28 Jun 2011 15:21:08 +0000 Subject: Fix issues due to renming of imange_id attrib. --- nova/compute/manager.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d081937bd..2c4f500f0 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -355,7 +355,7 @@ class ComputeManager(manager.SchedulerDependentManager): created_at=str(instance_ref['created_at']), launched_at=str(instance_ref['launched_at']) \ if instance_ref['launched_at'] else '', - image_id=instance_ref['image_id']) + image_ref=instance_ref['image_ref']) notifier_api.notify('compute.%s' % self.host, 'compute.instance.create', notifier_api.INFO, @@ -451,7 +451,7 @@ class ComputeManager(manager.SchedulerDependentManager): created_at=str(instance_ref['created_at']), launched_at=str(instance_ref['launched_at']) \ if instance_ref['launched_at'] else '', - image_id=instance_ref['image_id']) + image_ref=instance_ref['image_ref']) notifier_api.notify('compute.%s' % self.host, 'compute.instance.delete', notifier_api.INFO, @@ -502,7 +502,7 @@ class ComputeManager(manager.SchedulerDependentManager): created_at=str(instance_ref['created_at']), launched_at=str(instance_ref['launched_at']) \ if instance_ref['launched_at'] else '', - image_id=image_id) + image_ref=image_ref) notifier_api.notify('compute.%s' % self.host, 'compute.instance.rebuild', notifier_api.INFO, @@ -694,7 +694,7 @@ class ComputeManager(manager.SchedulerDependentManager): created_at=str(instance_ref['created_at']), launched_at=str(instance_ref['launched_at']) \ if instance_ref['launched_at'] else '', - image_id=instance_ref['image_id']) + image_ref=instance_ref['image_ref']) notifier_api.notify('compute.%s' % self.host, 'compute.instance.resize.confirm', notifier_api.INFO, @@ -756,7 +756,7 @@ class ComputeManager(manager.SchedulerDependentManager): created_at=str(instance_ref['created_at']), launched_at=str(instance_ref['launched_at']) \ if instance_ref['launched_at'] else '', - image_id=instance_ref['image_id']) + image_ref=instance_ref['image_ref']) notifier_api.notify('compute.%s' % self.host, 'compute.instance.resize.revert', notifier_api.INFO, @@ -809,7 +809,7 @@ class ComputeManager(manager.SchedulerDependentManager): created_at=str(instance_ref['created_at']), launched_at=str(instance_ref['launched_at']) \ if instance_ref['launched_at'] else '', - image_id=instance_ref['image_id']) + image_ref=instance_ref['image_ref']) notifier_api.notify('compute.%s' % self.host, 'compute.instance.resize.prep', notifier_api.INFO, -- cgit From 66b2fef4b294c7a351cc5815632da520c6ee811b Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Tue, 28 Jun 2011 15:50:07 +0000 Subject: Fix yet more merge-skew. --- nova/compute/manager.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 2c4f500f0..ea5734ebd 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -441,6 +441,8 @@ class ComputeManager(manager.SchedulerDependentManager): # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) usage_info = dict( tenant_id=instance_ref['project_id'], user_id=instance_ref['user_id'], -- cgit From c69fc237f3628d579a35af1f7bf3fbb4adeb81b7 Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Tue, 28 Jun 2011 15:59:44 +0000 Subject: Fix thinko in previous fix :P --- nova/compute/manager.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ea5734ebd..0a0ebd768 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -438,11 +438,10 @@ class ComputeManager(manager.SchedulerDependentManager): def terminate_instance(self, context, instance_id): """Terminate an instance on this host.""" self._shutdown_instance(context, instance_id, 'Terminating') + instance_ref = self.db.instance_get(context.elevated(), instance_id) # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) - context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) usage_info = dict( tenant_id=instance_ref['project_id'], user_id=instance_ref['user_id'], -- cgit From ec574986212b694bfed8109545b4b4dc578ec8f4 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 28 Jun 2011 14:49:40 -0500 Subject: Review feedback. --- nova/compute/manager.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 156b197e1..fc9a89379 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -547,15 +547,30 @@ class ComputeManager(manager.SchedulerDependentManager): :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) """ + # NOTE(jk0): Eventually extract this out to the ImageService? + def fetch_images(): + images = [] + offset = 0 + while True: + batch = image_service.detail(context, filters=filters, + offset=offset) + if not batch: + break + images += batch + offset += len(batch) + return images + image_service = nova.image.get_default_image_service() filters = {'property-image_type': 'backup', 'property-backup_type': backup_type, 'property-instance_uuid': instance_uuid} - images = image_service.detail(context, filters=filters) + + images = fetch_images() num_images = len(images) LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)" % locals())) if num_images > rotation: + # TODO(jk0): Use db-level sorting in glance when it hits trunk. # Sort oldest (by created_at) to end of list images.sort(key=itemgetter('created_at'), reverse=True) -- cgit From ee2eb1f712a87e73832618be6b79f74301d74a41 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 28 Jun 2011 14:58:34 -0500 Subject: Whoops. --- nova/compute/manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index fc9a89379..6a7bb73cb 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -550,14 +550,14 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(jk0): Eventually extract this out to the ImageService? def fetch_images(): images = [] - offset = 0 + marker = 0 while True: batch = image_service.detail(context, filters=filters, - offset=offset) + marker=marker) if not batch: break images += batch - offset += len(batch) + marker += len(batch) return images image_service = nova.image.get_default_image_service() -- cgit From ec1afee8399818db2ba11952a61c924da73f57a0 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 28 Jun 2011 15:17:23 -0500 Subject: OOPS --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6a7bb73cb..fdb231e9e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -550,14 +550,14 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(jk0): Eventually extract this out to the ImageService? def fetch_images(): images = [] - marker = 0 + marker = None while True: batch = image_service.detail(context, filters=filters, marker=marker) if not batch: break images += batch - marker += len(batch) + marker = batch[-1]['id'] return images image_service = nova.image.get_default_image_service() -- cgit From 498f2d671573fc19d551516f7ead5da8d052ee18 Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Tue, 28 Jun 2011 20:37:05 +0000 Subject: Refactored usage generation --- nova/compute/manager.py | 77 ++++++------------------------------------------- 1 file changed, 9 insertions(+), 68 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0a0ebd768..98e02f5b2 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -345,17 +345,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_launched_at(context, instance_id) self._update_state(context, instance_id) - usage_info = dict( - tenant_id=instance_ref['project_id'], - user_id=instance_ref['user_id'], - instance_id=instance_ref['id'], - instance_type=instance_ref['instance_type']['name'], - instance_type_id=instance_ref['instance_type_id'], - display_name=instance_ref['display_name'], - created_at=str(instance_ref['created_at']), - launched_at=str(instance_ref['launched_at']) \ - if instance_ref['launched_at'] else '', - image_ref=instance_ref['image_ref']) + usage_info = utils.usage_from_instance(instance_ref) notifier_api.notify('compute.%s' % self.host, 'compute.instance.create', notifier_api.INFO, @@ -442,17 +432,7 @@ class ComputeManager(manager.SchedulerDependentManager): # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) - usage_info = dict( - tenant_id=instance_ref['project_id'], - user_id=instance_ref['user_id'], - instance_id=instance_ref['id'], - instance_type=instance_ref['instance_type']['name'], - instance_type_id=instance_ref['instance_type_id'], - display_name=instance_ref['display_name'], - created_at=str(instance_ref['created_at']), - launched_at=str(instance_ref['launched_at']) \ - if instance_ref['launched_at'] else '', - image_ref=instance_ref['image_ref']) + usage_info = utils.usage_from_instance(instance_ref) notifier_api.notify('compute.%s' % self.host, 'compute.instance.delete', notifier_api.INFO, @@ -493,17 +473,8 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_image_ref(context, instance_id, image_ref) self._update_launched_at(context, instance_id) self._update_state(context, instance_id) - usage_info = dict( - tenant_id=instance_ref['project_id'], - user_id=instance_ref['user_id'], - instance_id=instance_ref['id'], - instance_type=instance_ref['instance_type']['name'], - instance_type_id=instance_ref['instance_type_id'], - display_name=instance_ref['display_name'], - created_at=str(instance_ref['created_at']), - launched_at=str(instance_ref['launched_at']) \ - if instance_ref['launched_at'] else '', - image_ref=image_ref) + usage_info = utils.usage_from_instance(instance_ref, + image_ref=image_ref) notifier_api.notify('compute.%s' % self.host, 'compute.instance.rebuild', notifier_api.INFO, @@ -685,17 +656,7 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) self.driver.destroy(instance_ref) - usage_info = dict( - tenant_id=instance_ref['project_id'], - user_id=instance_ref['user_id'], - instance_id=instance_ref['id'], - instance_type=instance_ref['instance_type']['name'], - instance_type_id=instance_ref['instance_type_id'], - display_name=instance_ref['display_name'], - created_at=str(instance_ref['created_at']), - launched_at=str(instance_ref['launched_at']) \ - if instance_ref['launched_at'] else '', - image_ref=instance_ref['image_ref']) + usage_info = utils.usage_from_instance(instance_ref) notifier_api.notify('compute.%s' % self.host, 'compute.instance.resize.confirm', notifier_api.INFO, @@ -747,17 +708,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.revert_resize(instance_ref) self.db.migration_update(context, migration_id, {'status': 'reverted'}) - usage_info = dict( - tenant_id=instance_ref['project_id'], - user_id=instance_ref['user_id'], - instance_id=instance_ref['id'], - instance_type=instance_type['name'], - instance_type_id=instance_type['id'], - display_name=instance_ref['display_name'], - created_at=str(instance_ref['created_at']), - launched_at=str(instance_ref['launched_at']) \ - if instance_ref['launched_at'] else '', - image_ref=instance_ref['image_ref']) + usage_info = utils.usage_from_instance(instance_ref) notifier_api.notify('compute.%s' % self.host, 'compute.instance.resize.revert', notifier_api.INFO, @@ -798,19 +749,9 @@ class ComputeManager(manager.SchedulerDependentManager): 'migration_id': migration_ref['id'], 'instance_id': instance_id, }, }) - usage_info = dict( - tenant_id=instance_ref['project_id'], - user_id=instance_ref['user_id'], - instance_id=instance_ref['id'], - instance_type=instance_ref['instance_type']['name'], - instance_type_id=instance_ref['instance_type_id'], - new_instance_type=instance_type['name'], - new_instance_type_id=instance_type['id'], - display_name=instance_ref['display_name'], - created_at=str(instance_ref['created_at']), - launched_at=str(instance_ref['launched_at']) \ - if instance_ref['launched_at'] else '', - image_ref=instance_ref['image_ref']) + usage_info = utils.usage_from_instance(instance_ref, + new_instance_type=instance_type['name'], + new_instance_type_id=instance_type['id']) notifier_api.notify('compute.%s' % self.host, 'compute.instance.resize.prep', notifier_api.INFO, -- cgit From d0ff8a737111e9155fd59816afa5c4fc2b34bb4c Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 28 Jun 2011 16:54:25 -0500 Subject: Let glance handle sorting. --- nova/compute/manager.py | 5 ----- 1 file changed, 5 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index fdb231e9e..f81e793fe 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -43,7 +43,6 @@ import time import functools from eventlet import greenthread -from operator import itemgetter from nova import exception from nova import flags @@ -570,10 +569,6 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)" % locals())) if num_images > rotation: - # TODO(jk0): Use db-level sorting in glance when it hits trunk. - # Sort oldest (by created_at) to end of list - images.sort(key=itemgetter('created_at'), reverse=True) - # NOTE(sirp): this deletes all backups that exceed the rotation # limit excess = len(images) - rotation -- cgit From 0ca902cb90ea824ef199601b65dbc52e6c713079 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Tue, 28 Jun 2011 18:50:17 -0500 Subject: Review feedback --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 404a2176b..40a640083 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -571,7 +571,7 @@ class ComputeManager(manager.SchedulerDependentManager): marker = None while True: batch = image_service.detail(context, filters=filters, - marker=marker) + marker=marker, sort_key='created_at', sort_dir='desc') if not batch: break images += batch -- cgit From d1adc2d969570049921370450e942e349deed840 Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Wed, 29 Jun 2011 13:17:16 +0400 Subject: Remove unnessesary (and possibly failing) encoding. --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 844192404..16ae07272 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -65,7 +65,7 @@ def generate_default_hostname(instance): else: table += '\0' deletions += c - return display_name.encode('latin-1', 'ignore').translate(table, deletions) + return display_name.translate(table, deletions) class API(base.Base): -- cgit From c8d27dd68d449df77106c9cdf45b63c25fcb18ca Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Wed, 29 Jun 2011 14:07:59 +0400 Subject: Brought back that encode under condition. --- nova/compute/api.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 16ae07272..aa62e72cd 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -65,6 +65,8 @@ def generate_default_hostname(instance): else: table += '\0' deletions += c + if isinstance(display_name, unicode): + display_name = display_name.encode('latin-1', 'ignore') return display_name.translate(table, deletions) -- cgit From 291df3a09a9970ad9ab0b236c93afe4d2a46920e Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Wed, 29 Jun 2011 09:29:07 -0700 Subject: removed extra stubout, switched to isinstance and catching explicit exception --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index b8c76c2f9..39ba06380 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -260,7 +260,7 @@ class API(base.Base): elevated = context.elevated() if security_group is None: security_group = ['default'] - if not type(security_group) is list: + if not isinstance(security_group, list): security_group = [security_group] security_groups = [] -- cgit From 851802e772095b646a7570bf0cc0c6d32be4643c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 29 Jun 2011 12:23:26 -0700 Subject: Fixed indentation issues Fixed min/max_count checking issues Fixed a wrongly log message when zone aware scheduler finds no suitable hosts --- nova/compute/api.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 1092ec727..92b87e75c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -657,12 +657,13 @@ class API(base.Base): return instances admin_context = context.elevated() - children = scheduler_api.call_zone_method(admin_context, "list", - novaclient_collection_name="servers", - reservation_id=reservation_id, - project_id=project_id, - fixed_ip=fixed_ip, - recurse_zones=True) + children = scheduler_api.call_zone_method(admin_context, + "list", + novaclient_collection_name="servers", + reservation_id=reservation_id, + project_id=project_id, + fixed_ip=fixed_ip, + recurse_zones=True) for zone, servers in children: for server in servers: -- cgit From 7623b91391e9c03beb81f30563e40e71bb94313b Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Jun 2011 17:06:58 -0500 Subject: changes a few instance refs --- nova/compute/manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 2f580939e..67fe1921f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -318,7 +318,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_launched_at(context, instance_id) self._update_state(context, instance_id) - usage_info = utils.usage_from_instance(instance_ref) + usage_info = utils.usage_from_instance(instance) notifier_api.notify('compute.%s' % self.host, 'compute.instance.create', notifier_api.INFO, @@ -372,11 +372,11 @@ class ComputeManager(manager.SchedulerDependentManager): def terminate_instance(self, context, instance_id): """Terminate an instance on this host.""" self._shutdown_instance(context, instance_id, 'Terminating') - instance_ref = self.db.instance_get(context.elevated(), instance_id) + instance = self.db.instance_get(context.elevated(), instance_id) # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) - usage_info = utils.usage_from_instance(instance_ref) + usage_info = utils.usage_from_instance(instance) notifier_api.notify('compute.%s' % self.host, 'compute.instance.delete', notifier_api.INFO, -- cgit From e06542ed504847efbff8c59905c75ef99c512ecc Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Thu, 30 Jun 2011 11:02:57 -0700 Subject: blah --- nova/compute/manager.py | 1 - 1 file changed, 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 98e02f5b2..af1226d1a 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -49,7 +49,6 @@ from nova import flags from nova import log as logging from nova import manager from nova import network -from nova import notifier from nova import rpc from nova import utils from nova import volume -- cgit From 7ca20797496947c0bdd60e77b4962fd360e01f55 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 1 Jul 2011 13:44:12 +0000 Subject: after trunk merge --- nova/compute/api.py | 10 ++++++++++ nova/compute/manager.py | 10 ++++++++++ 2 files changed, 20 insertions(+) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 28459dc75..4dba6cf1f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -912,6 +912,16 @@ class API(base.Base): """Unpause the given instance.""" self._cast_compute_message('unpause_instance', context, instance_id) + def disable_host(self, context, instance_id=None, host=None): + """Sets the specified to not receive new instances.""" + return self._call_compute_message("disable_host", context, + instance_id=None, host=host) + + def enable_host(self, context, instance_id=None, host=None): + """Sets the specified to receive new instances.""" + return self._call_compute_message("enable_host", context, + instance_id=None, host=host) + @scheduler_api.reroute_compute("diagnostics") def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for the given instance.""" diff --git a/nova/compute/manager.py b/nova/compute/manager.py index bbbddde0a..152b2670c 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -874,6 +874,16 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, result)) + @exception.wrap_exception + def disable_host(self, context, instance_id=None, host=None): + """Set a host so that it can not accept new instances.""" + return self.driver.disable_host(host) + + @exception.wrap_exception + def enable_host(self, context, instance_id=None, host=None): + """Set a host so that it can accept new instances.""" + return self.driver.enable_host(host) + @exception.wrap_exception def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for an instance on this host.""" -- cgit From fb6aba61ef03032c31196bd58c68fa7b7d4c2769 Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 1 Jul 2011 14:26:05 +0000 Subject: completed api changes. still need plugin changes --- nova/compute/api.py | 13 ++++--------- nova/compute/manager.py | 12 ++++-------- 2 files changed, 8 insertions(+), 17 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 4dba6cf1f..c99b64c50 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -912,15 +912,10 @@ class API(base.Base): """Unpause the given instance.""" self._cast_compute_message('unpause_instance', context, instance_id) - def disable_host(self, context, instance_id=None, host=None): - """Sets the specified to not receive new instances.""" - return self._call_compute_message("disable_host", context, - instance_id=None, host=host) - - def enable_host(self, context, instance_id=None, host=None): - """Sets the specified to receive new instances.""" - return self._call_compute_message("enable_host", context, - instance_id=None, host=host) + def set_host_enabled(self, context, host, enabled): + """Sets the specified host's ability to accept new instances.""" + return self._call_compute_message("set_host_enabled", context, + instance_id=None, host=host, enabled=enabled) @scheduler_api.reroute_compute("diagnostics") def get_diagnostics(self, context, instance_id): diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 152b2670c..91a604934 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -875,14 +875,10 @@ class ComputeManager(manager.SchedulerDependentManager): result)) @exception.wrap_exception - def disable_host(self, context, instance_id=None, host=None): - """Set a host so that it can not accept new instances.""" - return self.driver.disable_host(host) - - @exception.wrap_exception - def enable_host(self, context, instance_id=None, host=None): - """Set a host so that it can accept new instances.""" - return self.driver.enable_host(host) + def set_host_enabled(self, context, instance_id=None, host=None, + enabled=None): + """Sets the specified host's ability to accept new instances.""" + return self.driver.set_host_enabled(host, enabled) @exception.wrap_exception def get_diagnostics(self, context, instance_id): -- cgit From 42dabbc86e3af49215ced275d76d241b4daf8bdc Mon Sep 17 00:00:00 2001 From: Ed Leafe Date: Fri, 1 Jul 2011 19:44:10 +0000 Subject: Updated unit tests --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index c99b64c50..b0eedcd64 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -915,7 +915,7 @@ class API(base.Base): def set_host_enabled(self, context, host, enabled): """Sets the specified host's ability to accept new instances.""" return self._call_compute_message("set_host_enabled", context, - instance_id=None, host=host, enabled=enabled) + instance_id=None, host=host, params={"enabled": enabled}) @scheduler_api.reroute_compute("diagnostics") def get_diagnostics(self, context, instance_id): -- cgit