From 77d7e022fd5f2c8709a6784cc83429494d126a3b Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 9 Dec 2010 13:59:50 -0800 Subject: Converted the instance table to use a uuid instead of a auto_increment ID and a random internal_id. I had to use a String(32) column with hex and not a String(16) with bytes because SQLAlchemy doesn't like non-unicode strings going in for String types. We could try another type, but I didn't want a primary_key on blob types. --- nova/compute/api.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 8e0efa4cc..f310c575f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -36,9 +36,9 @@ from nova.db import base FLAGS = flags.FLAGS -def generate_default_hostname(internal_id): +def generate_default_hostname(instance_id): """Default function to generate a hostname given an instance reference.""" - return str(internal_id) + return str(instance_id) class ComputeAPI(base.Base): @@ -127,7 +127,6 @@ class ComputeAPI(base.Base): **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] - internal_id = instance['internal_id'] elevated = context.elevated() if not security_groups: @@ -138,9 +137,9 @@ class ComputeAPI(base.Base): security_group_id) # Set sane defaults if not specified - updates = dict(hostname=generate_hostname(internal_id)) + updates = dict(hostname=generate_hostname(instance_id)) if 'display_name' not in instance: - updates['display_name'] = "Server %s" % internal_id + updates['display_name'] = "Server %s" % instance_id instance = self.update_instance(context, instance_id, **updates) instances.append(instance) @@ -199,17 +198,16 @@ class ComputeAPI(base.Base): return self.db.instance_update(context, instance_id, kwargs) def delete_instance(self, context, instance_id): - logging.debug("Going to try and terminate %d" % instance_id) + logging.debug("Going to try and terminate %s" % instance_id) try: - instance = self.db.instance_get_by_internal_id(context, - instance_id) + instance = self.db.instance_get_by_id(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found during terminate", + logging.warning("Instance %s was not found during terminate", instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning("Instance %d is already being terminated", + logging.warning("Instance %s is already being terminated", instance_id) return @@ -264,11 +262,11 @@ class ComputeAPI(base.Base): return self.db.instance_get_all(context) def get_instance(self, context, instance_id): - return self.db.instance_get_by_internal_id(context, instance_id) + return self.db.instance_get_by_id(context, instance_id) def reboot(self, context, instance_id): """Reboot the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.db.instance_get_by_id(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), @@ -277,7 +275,7 @@ class ComputeAPI(base.Base): def rescue(self, context, instance_id): """Rescue the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.db.instance_get_by_id(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), @@ -286,7 +284,7 @@ class ComputeAPI(base.Base): def unrescue(self, context, instance_id): """Unrescue the given instance.""" - instance = self.db.instance_get_by_internal_id(context, instance_id) + instance = self.db.instance_get_by_id(context, instance_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), -- cgit From be9a3cd7e17edac4032c8ae554f75d725b0ad54a Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 13 Dec 2010 16:42:35 +0100 Subject: Move security group refresh logic into ComputeAPI. Add a trigger_security_group_members_refresh to ComputeAPI which finds the hosts that have instances that have security groups that reference a security group in which a new instance has just been placed, and sends a refresh_security_group_members to each of them. --- nova/compute/api.py | 61 +++++++++++++++++++++++++++++++++++++++++++++++++ nova/compute/manager.py | 16 ++++++++++--- 2 files changed, 74 insertions(+), 3 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 8e0efa4cc..27010d513 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -24,6 +24,7 @@ import datetime import logging import time +from nova import context from nova import db from nova import exception from nova import flags @@ -165,6 +166,10 @@ class ComputeAPI(base.Base): "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id}}) + + for group_id in security_groups: + self.trigger_security_group_members_refresh(elevated, group_id) + return instances def ensure_default_security_group(self, context): @@ -184,6 +189,62 @@ class ComputeAPI(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) + + def trigger_security_group_rules_refresh(self, context, security_group_id): + """Called when a rule is added to or removed from a security_group""" + + security_group = db.security_group_get(context, security_group_id) + + hosts = set() + for instance in security_group['instances']: + if instance['host'] is not None: + hosts.add(instance['host']) + + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group", + "args": {"security_group_id": security_group.id}}) + + + def trigger_security_group_members_refresh(self, context, group_id): + """Called when a security group gains a new or loses a member + + Sends an update request to each compute node for whom this is + relevant.""" + + # First, we get the security group rules that reference this group as + # the grantee.. + security_group_rules = \ + db.security_group_rule_get_by_security_group_grantee(context, + group_id) + + # ..then we distill the security groups to which they belong.. + security_groups = set() + for rule in security_group_rules: + security_groups.add(rule['parent_group_id']) + + # ..then we find the instances that are members of these groups.. + instances = set() + for security_group in security_groups: + for instance in security_group['instances']: + instances.add(instance['id']) + + # ...then we find the hosts where they live... + hosts = set() + for instance in instances: + if instance['host']: + hosts.add(instance['host']) + + # ...and finally we tell these nodes to refresh their view of this + # particular security group. + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_members", + "args": {"security_group_id": group_id}}) + + def update_instance(self, context, instance_id, **kwargs): """Updates the instance in the datastore. diff --git a/nova/compute/manager.py b/nova/compute/manager.py index dd8d41129..ee449c819 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -80,9 +80,19 @@ class ComputeManager(manager.Manager): @defer.inlineCallbacks @exception.wrap_exception - def refresh_security_group(self, context, security_group_id, **_kwargs): - """This call passes stright through to the virtualization driver.""" - yield self.driver.refresh_security_group(security_group_id) + def refresh_security_group_rules(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + yield self.driver.refresh_security_group_rules(security_group_id) + + + @defer.inlineCallbacks + @exception.wrap_exception + def refresh_security_group_members(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + yield self.driver.refresh_security_group_members(security_group_id) + @defer.inlineCallbacks @exception.wrap_exception -- cgit From 1539df7429a235ba2fefe3f65422fe94b248ac08 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 15 Dec 2010 14:03:19 +0100 Subject: refresh_security_group renamed to refresh_security_group_rules --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 27010d513..686c1eb0a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -203,7 +203,7 @@ class ComputeAPI(base.Base): for host in hosts: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "refresh_security_group", + {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group.id}}) -- cgit From b420a3daa5f1b827f49e5d6557aaa0f8d396b81b Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 15 Dec 2010 14:04:06 +0100 Subject: Lots of PEP-8 work. --- nova/compute/api.py | 8 ++------ nova/compute/manager.py | 2 -- 2 files changed, 2 insertions(+), 8 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 686c1eb0a..7c91792e3 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -166,7 +166,6 @@ class ComputeAPI(base.Base): "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id}}) - for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) @@ -189,7 +188,6 @@ class ComputeAPI(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) - def trigger_security_group_rules_refresh(self, context, security_group_id): """Called when a rule is added to or removed from a security_group""" @@ -206,10 +204,9 @@ class ComputeAPI(base.Base): {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group.id}}) - def trigger_security_group_members_refresh(self, context, group_id): """Called when a security group gains a new or loses a member - + Sends an update request to each compute node for whom this is relevant.""" @@ -223,7 +220,7 @@ class ComputeAPI(base.Base): security_groups = set() for rule in security_group_rules: security_groups.add(rule['parent_group_id']) - + # ..then we find the instances that are members of these groups.. instances = set() for security_group in security_groups: @@ -244,7 +241,6 @@ class ComputeAPI(base.Base): {"method": "refresh_security_group_members", "args": {"security_group_id": group_id}}) - def update_instance(self, context, instance_id, **kwargs): """Updates the instance in the datastore. diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ee449c819..f039bca2e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -85,7 +85,6 @@ class ComputeManager(manager.Manager): """This call passes straight through to the virtualization driver.""" yield self.driver.refresh_security_group_rules(security_group_id) - @defer.inlineCallbacks @exception.wrap_exception def refresh_security_group_members(self, context, @@ -93,7 +92,6 @@ class ComputeManager(manager.Manager): """This call passes straight through to the virtualization driver.""" yield self.driver.refresh_security_group_members(security_group_id) - @defer.inlineCallbacks @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): -- cgit From 729468d0be1bf97c869b1169414154a76d9b96b2 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 21 Dec 2010 19:20:28 -0500 Subject: Burnin support by specifying a specific host via availability_zone for running instances and volumes on. --- nova/compute/api.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index c740814da..299083bf4 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -57,6 +57,7 @@ class ComputeAPI(base.Base): max_count=1, kernel_id=None, ramdisk_id=None, display_name='', description='', key_name=None, key_data=None, security_group='default', + availability_zone=None, generate_hostname=generate_default_hostname): """Create the number of instances requested if quote and other arguments check out ok.""" @@ -121,7 +122,8 @@ class ComputeAPI(base.Base): 'display_name': display_name, 'display_description': description, 'key_name': key_name, - 'key_data': key_data} + 'key_data': key_data, + 'availability_zone': availability_zone} elevated = context.elevated() instances = [] -- cgit From 8aea573bd2e44e152fb4ef1627640bab1818dede Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Tue, 28 Dec 2010 23:55:58 -0600 Subject: initial lock functionality commit --- nova/compute/api.py | 35 ++++++++++++++++++++++++++++++++++- nova/compute/manager.py | 24 ++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index a47703461..361ab9914 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -141,7 +141,8 @@ class ComputeAPI(base.Base): 'display_description': description, 'user_data': user_data or '', 'key_name': key_name, - 'key_data': key_data} + 'key_data': key_data, + 'locked': False} elevated = context.elevated() instances = [] @@ -319,3 +320,35 @@ class ComputeAPI(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "unrescue_instance", "args": {"instance_id": instance['id']}}) + + def lock(self, context, instance_id): + """ + lock the instance with instance_id + + """ + instance = self.get_instance(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "lock_instance", + "args": {"instance_id": instance['id']}}) + + def unlock(self, context, instance_id): + """ + unlock the instance with instance_id + + """ + instance = self.get_instance(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unlock_instance", + "args": {"instance_id": instance['id']}}) + + def get_lock(self, context, instance_id): + """ + return the boolean state of (instance with instance_id)'s lock + + """ + instance = self.get_instance(context, instance_id) + return instance['locked'] diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 70b175e7c..05f1e44a2 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -329,6 +329,30 @@ class ComputeManager(manager.Manager): instance_id, result)) + @exception.wrap_exception + def lock_instance(self, context, instance_id): + """ + lock the instance with instance_id + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + logging.debug(_('instance %s: locking'), instance_ref['internal_id']) + self.db.instance_set_lock(context, instance_id, True) + + @exception.wrap_exception + def unlock_instance(self, context, instance_id): + """ + unlock the instance with instance_id + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + logging.debug(_('instance %s: unlocking'), instance_ref['internal_id']) + self.db.instance_set_lock(context, instance_id, False) + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" -- cgit From 64078137ce12ee52fff710f5a262d57b4ace2809 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 29 Dec 2010 16:29:15 -0800 Subject: Moved ec2 volume operations into a volume API interface for other components to use. Added attach/detach as compute.api methods, since they operate in the context of instances (and to avoid a dependency loop). --- nova/compute/api.py | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 28af434e3..870fcdbe4 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -30,6 +30,7 @@ from nova import flags from nova import quota from nova import rpc from nova import utils +from nova import volume from nova.compute import instance_types from nova.db import base @@ -44,13 +45,17 @@ def generate_default_hostname(instance_id): class ComputeAPI(base.Base): """API for interacting with the compute manager.""" - def __init__(self, network_manager=None, image_service=None, **kwargs): + def __init__(self, network_manager=None, image_service=None, + volume_api=None, **kwargs): if not network_manager: network_manager = utils.import_object(FLAGS.network_manager) self.network_manager = network_manager if not image_service: image_service = utils.import_object(FLAGS.image_service) self.image_service = image_service + if not volume_api: + volume_api = volume.API() + self.volume_api = volume_api super(ComputeAPI, self).__init__(**kwargs) def get_network_topic(self, context, instance_id): @@ -298,3 +303,30 @@ class ComputeAPI(base.Base): self.db.queue_get_for(context, FLAGS.compute_topic, host), {"method": "unrescue_instance", "args": {"instance_id": instance['id']}}) + + def attach_volume(self, context, instance_id, volume_id, device): + if not re.match("^/dev/[a-z]d[a-z]+$", device): + raise exception.ApiError(_("Invalid device specified: %s. " + "Example device: /dev/vdb") % device) + self.volume_api.check_attach(context, volume_id) + instance = self.get_instance(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + + def detach_volume(self, context, volume_id): + instance = self.db.volume_get_instance(context.elevated(), volume_id) + if not instance: + raise exception.ApiError(_("Volume isn't attached to anything!")) + self.volume_api.check_detach(context, volume_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "detach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume_id}}) + return instance -- cgit From 24e253a1feaa0a39e4095f447f62f7ea9b43c8bb Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 18:30:01 -0600 Subject: moved check lock decorator to compute api level. altered openstack.test_servers according and wrote test for lock in tests.test_compute --- nova/compute/api.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 361ab9914..d720a8f2c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -23,6 +23,7 @@ Handles all API requests relating to instances (guest vms). import datetime import logging import time +import functools from nova import db from nova import exception @@ -36,6 +37,40 @@ from nova.db import base FLAGS = flags.FLAGS +def checks_instance_lock(function): + """ + decorator used for preventing action against locked instances + unless, of course, you happen to be admin + + """ + + @functools.wraps(function) + def decorated_function(*args, **kwargs): + + # grab args to function + try: + if 'context' in kwargs: + context = kwargs['context'] + else: + context = args[1] + if 'instance_id' in kwargs: + instance_id = kwargs['instance_id'] + else: + instance_id = args[2] + locked = ComputeAPI().get_lock(context, instance_id) + admin = context.is_admin + except: + logging.error(_("check_instance_lock: argument error: |%s|, |%s|"), + args, + kwargs) + # if admin or unlocked call function, otherwise 405 + if admin or not locked: + return function(*args, **kwargs) + raise Exception(_("Instance is locked, cannot execute |%s|"), function) + + return decorated_function + + def generate_default_hostname(internal_id): """Default function to generate a hostname given an instance reference.""" return str(internal_id) @@ -198,6 +233,7 @@ class ComputeAPI(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) + @checks_instance_lock def update_instance(self, context, instance_id, **kwargs): """Updates the instance in the datastore. @@ -212,6 +248,7 @@ class ComputeAPI(base.Base): """ return self.db.instance_update(context, instance_id, kwargs) + @checks_instance_lock def delete_instance(self, context, instance_id): logging.debug("Going to try and terminate %d" % instance_id) try: @@ -258,6 +295,7 @@ class ComputeAPI(base.Base): def get_instance(self, context, instance_id): return self.db.instance_get_by_internal_id(context, instance_id) + @checks_instance_lock def reboot(self, context, instance_id): """Reboot the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -267,6 +305,7 @@ class ComputeAPI(base.Base): {"method": "reboot_instance", "args": {"instance_id": instance['id']}}) + @checks_instance_lock def pause(self, context, instance_id): """Pause the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -276,6 +315,7 @@ class ComputeAPI(base.Base): {"method": "pause_instance", "args": {"instance_id": instance['id']}}) + @checks_instance_lock def unpause(self, context, instance_id): """Unpause the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -285,6 +325,7 @@ class ComputeAPI(base.Base): {"method": "unpause_instance", "args": {"instance_id": instance['id']}}) + @checks_instance_lock def suspend(self, context, instance_id): """suspend the instance with instance_id""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -294,6 +335,7 @@ class ComputeAPI(base.Base): {"method": "suspend_instance", "args": {"instance_id": instance['id']}}) + @checks_instance_lock def resume(self, context, instance_id): """resume the instance with instance_id""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -303,6 +345,7 @@ class ComputeAPI(base.Base): {"method": "resume_instance", "args": {"instance_id": instance['id']}}) + @checks_instance_lock def rescue(self, context, instance_id): """Rescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -312,6 +355,7 @@ class ComputeAPI(base.Base): {"method": "rescue_instance", "args": {"instance_id": instance['id']}}) + @checks_instance_lock def unrescue(self, context, instance_id): """Unrescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) -- cgit From 74785bf8c070bf0760724b3412f4ee1bb05cf72b Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 18:34:49 -0600 Subject: fixd variables being out of scope in lock decorator --- nova/compute/api.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index d720a8f2c..073129c13 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -47,6 +47,10 @@ def checks_instance_lock(function): @functools.wraps(function) def decorated_function(*args, **kwargs): + # assume worst case (have to declare so they are in scope) + admin = False + locked = True + # grab args to function try: if 'context' in kwargs: @@ -60,7 +64,7 @@ def checks_instance_lock(function): locked = ComputeAPI().get_lock(context, instance_id) admin = context.is_admin except: - logging.error(_("check_instance_lock: argument error: |%s|, |%s|"), + raise Exception(_("check_instance_lock argument error |%s|, |%s|"), args, kwargs) # if admin or unlocked call function, otherwise 405 -- cgit From d06f85c611adf244f2c757f023c92c2b6cad2e7c Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 18:40:03 -0600 Subject: altered error exception/logging --- nova/compute/api.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 073129c13..19459c6d9 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -64,8 +64,7 @@ def checks_instance_lock(function): locked = ComputeAPI().get_lock(context, instance_id) admin = context.is_admin except: - raise Exception(_("check_instance_lock argument error |%s|, |%s|"), - args, + logging.error(_("check_instance_lock: arguments: |%s| |%s|"), args, kwargs) # if admin or unlocked call function, otherwise 405 if admin or not locked: -- cgit From 837724193ece16310ff588a84d23891a75ced2f2 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 18:46:36 -0600 Subject: altered error exception/logging --- nova/compute/api.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 19459c6d9..6602f2534 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -63,9 +63,11 @@ def checks_instance_lock(function): instance_id = args[2] locked = ComputeAPI().get_lock(context, instance_id) admin = context.is_admin - except: + except Excetion as e: logging.error(_("check_instance_lock: arguments: |%s| |%s|"), args, kwargs) + raise e + # if admin or unlocked call function, otherwise 405 if admin or not locked: return function(*args, **kwargs) -- cgit From 6f76367d2fefcec9b957352dd60e76c2cc3ba233 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 18:49:45 -0600 Subject: typo, trying to hurry.. look where that got me --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 6602f2534..232d1f26b 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -63,7 +63,7 @@ def checks_instance_lock(function): instance_id = args[2] locked = ComputeAPI().get_lock(context, instance_id) admin = context.is_admin - except Excetion as e: + except Exception as e: logging.error(_("check_instance_lock: arguments: |%s| |%s|"), args, kwargs) raise e -- cgit From b848f7459eb65ad365177d831783b3d63818f977 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 18:57:33 -0600 Subject: added some logging --- nova/compute/api.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 232d1f26b..0513ce95d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -47,6 +47,9 @@ def checks_instance_lock(function): @functools.wraps(function) def decorated_function(*args, **kwargs): + logging.info(_("check_instance_locks decorating |%s|"), function) + logging.info(_("check_instance_locks: arguments: |%s| |%s|"), args, + kwargs) # assume worst case (have to declare so they are in scope) admin = False locked = True -- cgit From 32b310f430c5db05c99de65a5bd400675770ef1d Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 19:27:43 -0600 Subject: removed db.set_lock, using update_instance instead --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 05f1e44a2..9a33c7cac 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -339,7 +339,7 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) logging.debug(_('instance %s: locking'), instance_ref['internal_id']) - self.db.instance_set_lock(context, instance_id, True) + self.db.instance_update(context, instance_id, {'locked': True}) @exception.wrap_exception def unlock_instance(self, context, instance_id): @@ -351,7 +351,7 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) logging.debug(_('instance %s: unlocking'), instance_ref['internal_id']) - self.db.instance_set_lock(context, instance_id, False) + self.db.instance_update(context, instance_id, {'locked': False}) @exception.wrap_exception def get_console_output(self, context, instance_id): -- cgit From f1523f2fd19cde4ddbb046dc0362a0ac7d6b79e8 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 20:48:33 -0600 Subject: moved check lock decorator from the compute api to the come manager... when it rains it pours --- nova/compute/api.py | 52 --------------------------------------- nova/compute/manager.py | 65 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 52 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 0513ce95d..361ab9914 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -23,7 +23,6 @@ Handles all API requests relating to instances (guest vms). import datetime import logging import time -import functools from nova import db from nova import exception @@ -37,48 +36,6 @@ from nova.db import base FLAGS = flags.FLAGS -def checks_instance_lock(function): - """ - decorator used for preventing action against locked instances - unless, of course, you happen to be admin - - """ - - @functools.wraps(function) - def decorated_function(*args, **kwargs): - - logging.info(_("check_instance_locks decorating |%s|"), function) - logging.info(_("check_instance_locks: arguments: |%s| |%s|"), args, - kwargs) - # assume worst case (have to declare so they are in scope) - admin = False - locked = True - - # grab args to function - try: - if 'context' in kwargs: - context = kwargs['context'] - else: - context = args[1] - if 'instance_id' in kwargs: - instance_id = kwargs['instance_id'] - else: - instance_id = args[2] - locked = ComputeAPI().get_lock(context, instance_id) - admin = context.is_admin - except Exception as e: - logging.error(_("check_instance_lock: arguments: |%s| |%s|"), args, - kwargs) - raise e - - # if admin or unlocked call function, otherwise 405 - if admin or not locked: - return function(*args, **kwargs) - raise Exception(_("Instance is locked, cannot execute |%s|"), function) - - return decorated_function - - def generate_default_hostname(internal_id): """Default function to generate a hostname given an instance reference.""" return str(internal_id) @@ -241,7 +198,6 @@ class ComputeAPI(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) - @checks_instance_lock def update_instance(self, context, instance_id, **kwargs): """Updates the instance in the datastore. @@ -256,7 +212,6 @@ class ComputeAPI(base.Base): """ return self.db.instance_update(context, instance_id, kwargs) - @checks_instance_lock def delete_instance(self, context, instance_id): logging.debug("Going to try and terminate %d" % instance_id) try: @@ -303,7 +258,6 @@ class ComputeAPI(base.Base): def get_instance(self, context, instance_id): return self.db.instance_get_by_internal_id(context, instance_id) - @checks_instance_lock def reboot(self, context, instance_id): """Reboot the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -313,7 +267,6 @@ class ComputeAPI(base.Base): {"method": "reboot_instance", "args": {"instance_id": instance['id']}}) - @checks_instance_lock def pause(self, context, instance_id): """Pause the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -323,7 +276,6 @@ class ComputeAPI(base.Base): {"method": "pause_instance", "args": {"instance_id": instance['id']}}) - @checks_instance_lock def unpause(self, context, instance_id): """Unpause the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -333,7 +285,6 @@ class ComputeAPI(base.Base): {"method": "unpause_instance", "args": {"instance_id": instance['id']}}) - @checks_instance_lock def suspend(self, context, instance_id): """suspend the instance with instance_id""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -343,7 +294,6 @@ class ComputeAPI(base.Base): {"method": "suspend_instance", "args": {"instance_id": instance['id']}}) - @checks_instance_lock def resume(self, context, instance_id): """resume the instance with instance_id""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -353,7 +303,6 @@ class ComputeAPI(base.Base): {"method": "resume_instance", "args": {"instance_id": instance['id']}}) - @checks_instance_lock def rescue(self, context, instance_id): """Rescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) @@ -363,7 +312,6 @@ class ComputeAPI(base.Base): {"method": "rescue_instance", "args": {"instance_id": instance['id']}}) - @checks_instance_lock def unrescue(self, context, instance_id): """Unrescue the given instance.""" instance = self.db.instance_get_by_internal_id(context, instance_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 9a33c7cac..224159596 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -36,6 +36,7 @@ terminating it. import datetime import logging +import functools from nova import exception from nova import flags @@ -53,6 +54,48 @@ flags.DEFINE_string('stub_network', False, 'Stub network related code') +def checks_instance_lock(function): + """ + decorator used for preventing action against locked instances + unless, of course, you happen to be admin + + """ + + @functools.wraps(function) + def decorated_function(*args, **kwargs): + + logging.info(_("check_instance_locks decorating |%s|"), function) + logging.info(_("check_instance_locks: arguments: |%s| |%s|"), args, + kwargs) + # assume worst case (have to declare so they are in scope) + admin = False + locked = True + + # grab args to function + try: + if 'context' in kwargs: + context = kwargs['context'] + else: + context = args[1] + if 'instance_id' in kwargs: + instance_id = kwargs['instance_id'] + else: + instance_id = args[2] + locked = args[0].get_locked(context, instance_id) + admin = context.is_admin + except Exception as e: + logging.error(_("check_instance_lock: arguments: |%s| |%s|"), args, + kwargs) + raise e + + # if admin or unlocked call function, otherwise 405 + if admin or not locked: + return function(*args, **kwargs) + raise Exception(_("Instance is locked, cannot execute |%s|"), function) + + return decorated_function + + class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" @@ -158,6 +201,7 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() @@ -202,6 +246,7 @@ class ComputeManager(manager.Manager): self.db.instance_destroy(context, instance_id) @exception.wrap_exception + @checks_instance_lock def reboot_instance(self, context, instance_id): """Reboot an instance on this server.""" context = context.elevated() @@ -225,6 +270,7 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def rescue_instance(self, context, instance_id): """Rescue an instance on this server.""" context = context.elevated() @@ -241,6 +287,7 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def unrescue_instance(self, context, instance_id): """Rescue an instance on this server.""" context = context.elevated() @@ -261,6 +308,7 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def pause_instance(self, context, instance_id): """Pause an instance on this server.""" context = context.elevated() @@ -279,6 +327,7 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + @checks_instance_lock def unpause_instance(self, context, instance_id): """Unpause a paused instance on this server.""" context = context.elevated() @@ -297,6 +346,7 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + @checks_instance_lock def suspend_instance(self, context, instance_id): """suspend the instance with instance_id""" context = context.elevated() @@ -314,6 +364,7 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + @checks_instance_lock def resume_instance(self, context, instance_id): """resume the suspended instance with instance_id""" context = context.elevated() @@ -353,6 +404,18 @@ class ComputeManager(manager.Manager): logging.debug(_('instance %s: unlocking'), instance_ref['internal_id']) self.db.instance_update(context, instance_id, {'locked': False}) + @exception.wrap_exception + def get_locked(self, context, instance_id): + """ + return the boolean state of (instance with instance_id)'s lock + + """ + context = context.elevated() + logging.debug(_('instance %s: getting locked'), + instance_ref['internal_id']) + instance_ref = self.db.instance_get(context, instance_id) + return instance_ref['locked'] + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" @@ -363,6 +426,7 @@ class ComputeManager(manager.Manager): return self.driver.get_console_output(instance_ref) @exception.wrap_exception + @checks_instance_lock def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() @@ -392,6 +456,7 @@ class ComputeManager(manager.Manager): return True @exception.wrap_exception + @checks_instance_lock def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" context = context.elevated() -- cgit From 656233762a61929d43f671e4765d52f25299562f Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 20:52:48 -0600 Subject: syntax error --- nova/compute/manager.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 224159596..cd2c95d8d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -411,8 +411,7 @@ class ComputeManager(manager.Manager): """ context = context.elevated() - logging.debug(_('instance %s: getting locked'), - instance_ref['internal_id']) + logging.debug(_('instance %s: getting locked state'), instance_id) instance_ref = self.db.instance_get(context, instance_id) return instance_ref['locked'] -- cgit From 4531600425d71659581aa549bdc5e719e41efc9e Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Wed, 29 Dec 2010 22:08:38 -0600 Subject: altered the compute lock test --- nova/compute/manager.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index cd2c95d8d..2671d401c 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -70,6 +70,7 @@ def checks_instance_lock(function): # assume worst case (have to declare so they are in scope) admin = False locked = True + instance_id = False # grab args to function try: @@ -81,17 +82,20 @@ def checks_instance_lock(function): instance_id = kwargs['instance_id'] else: instance_id = args[2] - locked = args[0].get_locked(context, instance_id) + locked = args[0].get_lock(context, instance_id) admin = context.is_admin except Exception as e: - logging.error(_("check_instance_lock: arguments: |%s| |%s|"), args, + logging.error(_("check_instance_lock fail! args: |%s| |%s|"), args, kwargs) raise e - # if admin or unlocked call function, otherwise 405 + # if admin or unlocked call function otherwise log error if admin or not locked: - return function(*args, **kwargs) - raise Exception(_("Instance is locked, cannot execute |%s|"), function) + function(*args, **kwargs) + else: + logging.error(_("Instance |%s| is locked, cannot execute |%s|"), + instance_id, function) + return False return decorated_function @@ -405,7 +409,7 @@ class ComputeManager(manager.Manager): self.db.instance_update(context, instance_id, {'locked': False}) @exception.wrap_exception - def get_locked(self, context, instance_id): + def get_lock(self, context, instance_id): """ return the boolean state of (instance with instance_id)'s lock -- cgit From b1a08af498ed6b52e3373a23196ded0396e6d34b Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 29 Dec 2010 20:30:36 -0800 Subject: Cleaned up the compute API, mostly consistency with other parts of the system and renaming redundant module names. --- nova/compute/__init__.py | 324 +++++++++++++++++++++++++++++++++++++++++++-- nova/compute/api.py | 332 ----------------------------------------------- 2 files changed, 312 insertions(+), 344 deletions(-) delete mode 100644 nova/compute/api.py (limited to 'nova/compute') diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index a5df2ec1a..648c0ff6a 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -17,16 +17,316 @@ # under the License. """ -:mod:`nova.compute` -- Compute Nodes using LibVirt -===================================================== - -.. automodule:: nova.compute - :platform: Unix - :synopsis: Thin wrapper around libvirt for VM mgmt. -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith +Handles all requests relating to instances (guest vms). """ + +import datetime +import logging +import time + +from nova import db +from nova import exception +from nova import flags +from nova import quota +from nova import rpc +from nova import utils +from nova import volume +from nova.compute import instance_types +from nova.db import base + +FLAGS = flags.FLAGS + + +def generate_default_hostname(instance_id): + """Default function to generate a hostname given an instance reference.""" + return str(instance_id) + + +class API(base.Base): + """API for interacting with the compute manager.""" + + def __init__(self, network_manager=None, image_service=None, + volume_api=None, **kwargs): + if not network_manager: + network_manager = utils.import_object(FLAGS.network_manager) + self.network_manager = network_manager + if not image_service: + image_service = utils.import_object(FLAGS.image_service) + self.image_service = image_service + if not volume_api: + volume_api = volume.API() + self.volume_api = volume_api + super(API, self).__init__(**kwargs) + + def get_network_topic(self, context, instance_id): + try: + instance = self.get(context, instance_id) + except exception.NotFound as e: + logging.warning("Instance %d was not found in get_network_topic", + instance_id) + raise e + + host = instance['host'] + if not host: + raise exception.Error("Instance %d has no host" % instance_id) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) + return rpc.call(context, + topic, + {"method": "get_network_topic", "args": {'fake': 1}}) + + def create(self, context, instance_type, + image_id, kernel_id=None, ramdisk_id=None, + min_count=1, max_count=1, + display_name='', display_description='', + key_name=None, key_data=None, security_group='default', + user_data=None, generate_hostname=generate_default_hostname): + """Create the number of instances requested if quota and + other arguments check out ok.""" + + num_instances = quota.allowed_instances(context, max_count, + instance_type) + if num_instances < min_count: + logging.warn("Quota exceeeded for %s, tried to run %s instances", + context.project_id, min_count) + raise quota.QuotaError("Instance quota exceeded. You can only " + "run %s more instances of this type." % + num_instances, "InstanceLimitExceeded") + + is_vpn = image_id == FLAGS.vpn_image_id + if not is_vpn: + image = self.image_service.show(context, image_id) + if kernel_id is None: + kernel_id = image.get('kernelId', None) + if ramdisk_id is None: + ramdisk_id = image.get('ramdiskId', None) + # No kernel and ramdisk for raw images + if kernel_id == str(FLAGS.null_kernel): + kernel_id = None + ramdisk_id = None + logging.debug("Creating a raw instance") + # Make sure we have access to kernel and ramdisk (if not raw) + if kernel_id: + self.image_service.show(context, kernel_id) + if ramdisk_id: + self.image_service.show(context, ramdisk_id) + + if security_group is None: + security_group = ['default'] + if not type(security_group) is list: + security_group = [security_group] + + security_groups = [] + self.ensure_default_security_group(context) + for security_group_name in security_group: + group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + security_groups.append(group['id']) + + if key_data is None and key_name: + key_pair = db.key_pair_get(context, context.user_id, key_name) + key_data = key_pair['public_key'] + + type_data = instance_types.INSTANCE_TYPES[instance_type] + base_options = { + 'reservation_id': utils.generate_uid('r'), + 'image_id': image_id, + 'kernel_id': kernel_id or '', + 'ramdisk_id': ramdisk_id or '', + 'state_description': 'scheduling', + 'user_id': context.user_id, + 'project_id': context.project_id, + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': instance_type, + 'memory_mb': type_data['memory_mb'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + 'display_name': display_name, + 'display_description': display_description, + 'user_data': user_data or '', + 'key_name': key_name, + 'key_data': key_data} + + elevated = context.elevated() + instances = [] + logging.debug(_("Going to run %s instances..."), num_instances) + for num in range(num_instances): + instance = dict(mac_address=utils.generate_mac(), + launch_index=num, + **base_options) + instance = self.db.instance_create(context, instance) + instance_id = instance['id'] + + elevated = context.elevated() + if not security_groups: + security_groups = [] + for security_group_id in security_groups: + self.db.instance_add_security_group(elevated, + instance_id, + security_group_id) + + # Set sane defaults if not specified + updates = dict(hostname=generate_hostname(instance_id)) + if 'display_name' not in instance: + updates['display_name'] = "Server %s" % instance_id + + instance = self.update(context, instance_id, **updates) + instances.append(instance) + + logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + context.project_id, context.user_id, instance_id) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "run_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id}}) + + return instances + + def ensure_default_security_group(self, context): + """ Create security group for the security context if it + does not already exist + + :param context: the security context + + """ + try: + db.security_group_get_by_name(context, context.project_id, + 'default') + except exception.NotFound: + values = {'name': 'default', + 'description': 'default', + 'user_id': context.user_id, + 'project_id': context.project_id} + db.security_group_create(context, values) + + def update(self, context, instance_id, **kwargs): + """Updates the instance in the datastore. + + :param context: The security context + :param instance_id: ID of the instance to update + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + updated + + :retval None + + """ + return self.db.instance_update(context, instance_id, kwargs) + + def delete(self, context, instance_id): + logging.debug("Going to try and terminate %s" % instance_id) + try: + instance = self.get(context, instance_id) + except exception.NotFound as e: + logging.warning(_("Instance %s was not found during terminate"), + instance_id) + raise e + + if (instance['state_description'] == 'terminating'): + logging.warning(_("Instance %s is already being terminated"), + instance_id) + return + + self.update(context, + instance['id'], + state_description='terminating', + state=0, + terminated_at=datetime.datetime.utcnow()) + + host = instance['host'] + if host: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "terminate_instance", + "args": {"instance_id": instance['id']}}) + else: + self.db.instance_destroy(context, instance['id']) + + def get(self, context, instance_id=None, project_id=None): + """Get one or more instances, possibly filtered by project + ID or user ID. If there is no filter and the context is + an admin, it will retreive all instances in the system.""" + if instance_id is not None: + return self.db.instance_get_by_id(context, instance_id) + if project_id or not context.is_admin: + if not context.project: + return self.db.instance_get_all_by_user(context, + context.user_id) + if project_id is None: + project_id = context.project_id + return self.db.instance_get_all_by_project(context, + project_id) + return self.db.instance_get_all(context) + + def reboot(self, context, instance_id): + """Reboot the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "reboot_instance", + "args": {"instance_id": instance['id']}}) + + def pause(self, context, instance_id): + """Pause the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "pause_instance", + "args": {"instance_id": instance['id']}}) + + def unpause(self, context, instance_id): + """Unpause the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unpause_instance", + "args": {"instance_id": instance['id']}}) + + def rescue(self, context, instance_id): + """Rescue the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "rescue_instance", + "args": {"instance_id": instance['id']}}) + + def unrescue(self, context, instance_id): + """Unrescue the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unrescue_instance", + "args": {"instance_id": instance['id']}}) + + def attach_volume(self, context, instance_id, volume_id, device): + if not re.match("^/dev/[a-z]d[a-z]+$", device): + raise exception.ApiError(_("Invalid device specified: %s. " + "Example device: /dev/vdb") % device) + self.volume_api.check_attach(context, volume_id) + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + + def detach_volume(self, context, volume_id): + instance = self.db.volume_get_instance(context.elevated(), volume_id) + if not instance: + raise exception.ApiError(_("Volume isn't attached to anything!")) + self.volume_api.check_detach(context, volume_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "detach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume_id}}) + return instance diff --git a/nova/compute/api.py b/nova/compute/api.py deleted file mode 100644 index 870fcdbe4..000000000 --- a/nova/compute/api.py +++ /dev/null @@ -1,332 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handles all API requests relating to instances (guest vms). -""" - -import datetime -import logging -import time - -from nova import db -from nova import exception -from nova import flags -from nova import quota -from nova import rpc -from nova import utils -from nova import volume -from nova.compute import instance_types -from nova.db import base - -FLAGS = flags.FLAGS - - -def generate_default_hostname(instance_id): - """Default function to generate a hostname given an instance reference.""" - return str(instance_id) - - -class ComputeAPI(base.Base): - """API for interacting with the compute manager.""" - - def __init__(self, network_manager=None, image_service=None, - volume_api=None, **kwargs): - if not network_manager: - network_manager = utils.import_object(FLAGS.network_manager) - self.network_manager = network_manager - if not image_service: - image_service = utils.import_object(FLAGS.image_service) - self.image_service = image_service - if not volume_api: - volume_api = volume.API() - self.volume_api = volume_api - super(ComputeAPI, self).__init__(**kwargs) - - def get_network_topic(self, context, instance_id): - try: - instance = self.db.instance_get_by_id(context, instance_id) - except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) - raise e - - host = instance['host'] - if not host: - raise exception.Error("Instance %d has no host" % instance_id) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) - return rpc.call(context, - topic, - {"method": "get_network_topic", "args": {'fake': 1}}) - - def create_instances(self, context, instance_type, image_id, min_count=1, - max_count=1, kernel_id=None, ramdisk_id=None, - display_name='', description='', key_name=None, - key_data=None, security_group='default', - user_data=None, - generate_hostname=generate_default_hostname): - """Create the number of instances requested if quote and - other arguments check out ok.""" - - num_instances = quota.allowed_instances(context, max_count, - instance_type) - if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % - num_instances, "InstanceLimitExceeded") - - is_vpn = image_id == FLAGS.vpn_image_id - if not is_vpn: - image = self.image_service.show(context, image_id) - if kernel_id is None: - kernel_id = image.get('kernelId', None) - if ramdisk_id is None: - ramdisk_id = image.get('ramdiskId', None) - #No kernel and ramdisk for raw images - if kernel_id == str(FLAGS.null_kernel): - kernel_id = None - ramdisk_id = None - logging.debug("Creating a raw instance") - # Make sure we have access to kernel and ramdisk (if not raw) - if kernel_id: - self.image_service.show(context, kernel_id) - if ramdisk_id: - self.image_service.show(context, ramdisk_id) - - if security_group is None: - security_group = ['default'] - if not type(security_group) is list: - security_group = [security_group] - - security_groups = [] - self.ensure_default_security_group(context) - for security_group_name in security_group: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) - security_groups.append(group['id']) - - if key_data is None and key_name: - key_pair = db.key_pair_get(context, context.user_id, key_name) - key_data = key_pair['public_key'] - - type_data = instance_types.INSTANCE_TYPES[instance_type] - base_options = { - 'reservation_id': utils.generate_uid('r'), - 'image_id': image_id, - 'kernel_id': kernel_id or '', - 'ramdisk_id': ramdisk_id or '', - 'state_description': 'scheduling', - 'user_id': context.user_id, - 'project_id': context.project_id, - 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), - 'instance_type': instance_type, - 'memory_mb': type_data['memory_mb'], - 'vcpus': type_data['vcpus'], - 'local_gb': type_data['local_gb'], - 'display_name': display_name, - 'display_description': description, - 'user_data': user_data or '', - 'key_name': key_name, - 'key_data': key_data} - - elevated = context.elevated() - instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) - for num in range(num_instances): - instance = dict(mac_address=utils.generate_mac(), - launch_index=num, - **base_options) - instance = self.db.instance_create(context, instance) - instance_id = instance['id'] - - elevated = context.elevated() - if not security_groups: - security_groups = [] - for security_group_id in security_groups: - self.db.instance_add_security_group(elevated, - instance_id, - security_group_id) - - # Set sane defaults if not specified - updates = dict(hostname=generate_hostname(instance_id)) - if 'display_name' not in instance: - updates['display_name'] = "Server %s" % instance_id - - instance = self.update_instance(context, instance_id, **updates) - instances.append(instance) - - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), - context.project_id, context.user_id, instance_id) - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "run_instance", - "args": {"topic": FLAGS.compute_topic, - "instance_id": instance_id}}) - - return instances - - def ensure_default_security_group(self, context): - """ Create security group for the security context if it - does not already exist - - :param context: the security context - - """ - try: - db.security_group_get_by_name(context, context.project_id, - 'default') - except exception.NotFound: - values = {'name': 'default', - 'description': 'default', - 'user_id': context.user_id, - 'project_id': context.project_id} - db.security_group_create(context, values) - - def update_instance(self, context, instance_id, **kwargs): - """Updates the instance in the datastore. - - :param context: The security context - :param instance_id: ID of the instance to update - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - updated - - :retval None - - """ - return self.db.instance_update(context, instance_id, kwargs) - - def delete_instance(self, context, instance_id): - logging.debug("Going to try and terminate %s" % instance_id) - try: - instance = self.db.instance_get_by_id(context, instance_id) - except exception.NotFound as e: - logging.warning(_("Instance %s was not found during terminate"), - instance_id) - raise e - - if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %s is already being terminated"), - instance_id) - return - - self.update_instance(context, - instance['id'], - state_description='terminating', - state=0, - terminated_at=datetime.datetime.utcnow()) - - host = instance['host'] - if host: - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "terminate_instance", - "args": {"instance_id": instance['id']}}) - else: - self.db.instance_destroy(context, instance['id']) - - def get_instances(self, context, project_id=None): - """Get all instances, possibly filtered by project ID or - user ID. If there is no filter and the context is an admin, - it will retreive all instances in the system.""" - if project_id or not context.is_admin: - if not context.project: - return self.db.instance_get_all_by_user(context, - context.user_id) - if project_id is None: - project_id = context.project_id - return self.db.instance_get_all_by_project(context, project_id) - return self.db.instance_get_all(context) - - def get_instance(self, context, instance_id): - return self.db.instance_get_by_id(context, instance_id) - - def reboot(self, context, instance_id): - """Reboot the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "reboot_instance", - "args": {"instance_id": instance['id']}}) - - def pause(self, context, instance_id): - """Pause the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "pause_instance", - "args": {"instance_id": instance['id']}}) - - def unpause(self, context, instance_id): - """Unpause the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unpause_instance", - "args": {"instance_id": instance['id']}}) - - def rescue(self, context, instance_id): - """Rescue the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "rescue_instance", - "args": {"instance_id": instance['id']}}) - - def unrescue(self, context, instance_id): - """Unrescue the given instance.""" - instance = self.db.instance_get_by_id(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unrescue_instance", - "args": {"instance_id": instance['id']}}) - - def attach_volume(self, context, instance_id, volume_id, device): - if not re.match("^/dev/[a-z]d[a-z]+$", device): - raise exception.ApiError(_("Invalid device specified: %s. " - "Example device: /dev/vdb") % device) - self.volume_api.check_attach(context, volume_id) - instance = self.get_instance(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "attach_volume", - "args": {"volume_id": volume_id, - "instance_id": instance_id, - "mountpoint": device}}) - - def detach_volume(self, context, volume_id): - instance = self.db.volume_get_instance(context.elevated(), volume_id) - if not instance: - raise exception.ApiError(_("Volume isn't attached to anything!")) - self.volume_api.check_detach(context, volume_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "detach_volume", - "args": {"instance_id": instance['id'], - "volume_id": volume_id}}) - return instance -- cgit From 750a0c9b413ad3912d522355332cffadd9667d0c Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 29 Dec 2010 21:41:42 -0800 Subject: Converted a few more ec2 calls to use compute api. --- nova/compute/__init__.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index 648c0ff6a..fd1cdcd1b 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -243,12 +243,18 @@ class API(base.Base): else: self.db.instance_destroy(context, instance['id']) - def get(self, context, instance_id=None, project_id=None): - """Get one or more instances, possibly filtered by project - ID or user ID. If there is no filter and the context is + def get(self, context, instance_id=None, project_id=None, + reservation_id=None, fixed_ip=None): + """Get one or more instances, possibly filtered by one of the + given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system.""" if instance_id is not None: return self.db.instance_get_by_id(context, instance_id) + if reservation_id is not None: + return self.db.instance_get_all_by_reservation(context, + reservation_id) + if fixed_ip is not None: + return self.db.fixed_ip_get_instance(context, fixed_ip) if project_id or not context.is_admin: if not context.project: return self.db.instance_get_all_by_user(context, -- cgit From 71d715d422a746f4951877d8ff76e0ace355281e Mon Sep 17 00:00:00 2001 From: Eric Day Date: Thu, 30 Dec 2010 15:43:41 -0800 Subject: Moved network operation code in ec2 api into a generic network API class. Removed a circular dependency with compute/quota. --- nova/compute/__init__.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index fd1cdcd1b..14c242641 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -27,6 +27,7 @@ import time from nova import db from nova import exception from nova import flags +from nova import network from nova import quota from nova import rpc from nova import utils @@ -45,14 +46,14 @@ def generate_default_hostname(instance_id): class API(base.Base): """API for interacting with the compute manager.""" - def __init__(self, network_manager=None, image_service=None, - volume_api=None, **kwargs): - if not network_manager: - network_manager = utils.import_object(FLAGS.network_manager) - self.network_manager = network_manager + def __init__(self, image_service=None, network_api=None, volume_api=None, + **kwargs): if not image_service: image_service = utils.import_object(FLAGS.image_service) self.image_service = image_service + if not network_api: + network_api = network.API() + self.network_api = network_api if not volume_api: volume_api = volume.API() self.volume_api = volume_api @@ -83,8 +84,9 @@ class API(base.Base): """Create the number of instances requested if quota and other arguments check out ok.""" + type_data = instance_types.INSTANCE_TYPES[instance_type] num_instances = quota.allowed_instances(context, max_count, - instance_type) + type_data['vcpus']) if num_instances < min_count: logging.warn("Quota exceeeded for %s, tried to run %s instances", context.project_id, min_count) @@ -127,7 +129,6 @@ class API(base.Base): key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] - type_data = instance_types.INSTANCE_TYPES[instance_type] base_options = { 'reservation_id': utils.generate_uid('r'), 'image_id': image_id, @@ -336,3 +337,8 @@ class API(base.Base): "args": {"instance_id": instance['id'], "volume_id": volume_id}}) return instance + + def associate_floating_ip(self, context, instance_id, address): + instance = self.get(context, instance_id) + self.network_api.associate_floating_ip(context, address, + instance['fixed_ip']) -- cgit From d1118830c01267082c1371ef2faad1057e7a811e Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 3 Jan 2011 13:55:44 +0100 Subject: Stop returning generators in the refresh_security_group_{rules,members} methods. --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6615ad65b..235237091 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -103,13 +103,13 @@ class ComputeManager(manager.Manager): def refresh_security_group_rules(self, context, security_group_id, **_kwargs): """This call passes straight through to the virtualization driver.""" - yield self.driver.refresh_security_group_rules(security_group_id) + return self.driver.refresh_security_group_rules(security_group_id) @exception.wrap_exception def refresh_security_group_members(self, context, security_group_id, **_kwargs): """This call passes straight through to the virtualization driver.""" - yield self.driver.refresh_security_group_members(security_group_id) + return self.driver.refresh_security_group_members(security_group_id) @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): -- cgit From 6a8f011789ddad57726ce55962b51a04a69fe527 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Mon, 3 Jan 2011 16:08:52 -0600 Subject: Fixes LP688545 --- nova/compute/manager.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c9aff75ac..6e8f34347 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -327,6 +327,7 @@ class ComputeManager(manager.Manager): instance_ref["internal_id"]) return self.driver.get_diagnostics(instance_ref) + @exception.wrap_exception def suspend_instance(self, context, instance_id): """suspend the instance with instance_id""" context = context.elevated() -- cgit From c7305af78049f94dedcbb55480b91a3c6d843b9f Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 4 Jan 2011 00:23:35 -0500 Subject: Apply logging changes as a giant patch to work around the cloudpipe delete + add issue in the original patch. --- nova/compute/api.py | 33 ++++++++++----------- nova/compute/disk.py | 11 +++---- nova/compute/manager.py | 76 ++++++++++++++++++++++++++++--------------------- nova/compute/monitor.py | 39 ++++++++++++------------- 4 files changed, 86 insertions(+), 73 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 07c69bd31..c0141e569 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -21,12 +21,12 @@ Handles all API requests relating to instances (guest vms). """ import datetime -import logging import time from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import quota from nova import rpc from nova import utils @@ -34,6 +34,7 @@ from nova.compute import instance_types from nova.db import base FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.compute.api') def generate_default_hostname(internal_id): @@ -58,13 +59,13 @@ class ComputeAPI(base.Base): instance = self.db.instance_get_by_internal_id(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) + LOG.warning(_("Instance %d was not found in get_network_topic"), + instance_id) raise e host = instance['host'] if not host: - raise exception.Error("Instance %d has no host" % instance_id) + raise exception.Error(_("Instance %d has no host") % instance_id) topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) return rpc.call(context, topic, @@ -83,10 +84,10 @@ class ComputeAPI(base.Base): num_instances = quota.allowed_instances(context, max_count, instance_type) if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % + LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"), + context.project_id, min_count) + raise quota.QuotaError(_("Instance quota exceeded. You can only " + "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") is_vpn = image_id == FLAGS.vpn_image_id @@ -100,7 +101,7 @@ class ComputeAPI(base.Base): if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None - logging.debug("Creating a raw instance") + LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) if kernel_id: self.image_service.show(context, kernel_id) @@ -147,7 +148,7 @@ class ComputeAPI(base.Base): elevated = context.elevated() instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) + LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, @@ -172,7 +173,7 @@ class ComputeAPI(base.Base): instance = self.update_instance(context, instance_id, **updates) instances.append(instance) - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + LOG.debug(_("Casting to scheduler for %s/%s's instance %s"), context.project_id, context.user_id, instance_id) rpc.cast(context, FLAGS.scheduler_topic, @@ -214,18 +215,18 @@ class ComputeAPI(base.Base): return self.db.instance_update(context, instance_id, kwargs) def delete_instance(self, context, instance_id): - logging.debug("Going to try and terminate %d" % instance_id) + LOG.debug(_("Going to try and terminate %d"), instance_id) try: instance = self.db.instance_get_by_internal_id(context, instance_id) except exception.NotFound as e: - logging.warning(_("Instance %d was not found during terminate"), - instance_id) + LOG.warning(_("Instance %d was not found during terminate"), + instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %d is already being terminated"), - instance_id) + LOG.warning(_("Instance %d is already being terminated"), + instance_id) return self.update_instance(context, diff --git a/nova/compute/disk.py b/nova/compute/disk.py index 814a258cd..741499294 100644 --- a/nova/compute/disk.py +++ b/nova/compute/disk.py @@ -22,14 +22,15 @@ Includes injection of SSH PGP keys into authorized_keys file. """ -import logging import os import tempfile from nova import exception from nova import flags +from nova import log as logging +LOG = logging.getLogger('nova.compute.disk') FLAGS = flags.FLAGS flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10, 'minimum size in bytes of root partition') @@ -67,12 +68,12 @@ def partition(infile, outfile, local_bytes=0, resize=True, execute('resize2fs %s' % infile) file_size = FLAGS.minimum_root_size elif file_size % sector_size != 0: - logging.warn(_("Input partition size not evenly divisible by" - " sector size: %d / %d"), file_size, sector_size) + LOG.warn(_("Input partition size not evenly divisible by" + " sector size: %d / %d"), file_size, sector_size) primary_sectors = file_size / sector_size if local_bytes % sector_size != 0: - logging.warn(_("Bytes for local storage not evenly divisible" - " by sector size: %d / %d"), local_bytes, sector_size) + LOG.warn(_("Bytes for local storage not evenly divisible" + " by sector size: %d / %d"), local_bytes, sector_size) local_sectors = local_bytes / sector_size mbr_last = 62 # a diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6e8f34347..cc5724346 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -35,10 +35,10 @@ terminating it. """ import datetime -import logging from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils @@ -53,6 +53,9 @@ flags.DEFINE_string('stub_network', False, 'Stub network related code') +LOG = logging.getLogger('nova.computemanager') + + class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" @@ -111,7 +114,7 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) - logging.debug(_("instance %s: starting..."), instance_id) + LOG.debug(_("instance %s: starting..."), instance_id) self.db.instance_update(context, instance_id, {'host': self.host}) @@ -149,8 +152,8 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - logging.exception(_("instance %s: Failed to spawn"), - instance_ref['name']) + LOG.exception(_("instance %s: Failed to spawn"), + instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.SHUTDOWN) @@ -161,14 +164,16 @@ class ComputeManager(manager.Manager): def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Terminating instance %s//%s"), + instance_ref['internal_id'], instance_id, context=context) if not FLAGS.stub_network: address = self.db.instance_get_floating_address(context, instance_ref['id']) if address: - logging.debug(_("Disassociating address %s") % address) + LOG.debug(_("Disassociating address %s"), address, + context=context) # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. @@ -180,14 +185,15 @@ class ComputeManager(manager.Manager): address = self.db.instance_get_fixed_address(context, instance_ref['id']) if address: - logging.debug(_("Deallocating address %s") % address) + LOG.debug(_("Deallocating address %s"), address, + context=context) # NOTE(vish): Currently, nothing needs to be done on the # network node until release. If this changes, # we will need to cast here. self.network_manager.deallocate_fixed_ip(context.elevated(), address) - logging.debug(_("instance %s: terminating"), instance_id) + LOG.debug(_("instance %s: terminating"), instance_id, context=context) volumes = instance_ref.get('volumes', []) or [] for volume in volumes: @@ -207,15 +213,18 @@ class ComputeManager(manager.Manager): context = context.elevated() self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Rebooting instance %s//%s"), instance_ref['internal_id'], + instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to reboot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_ref['internal_id'], - instance_ref['state'], - power_state.RUNNING) - - logging.debug(_('instance %s: rebooting'), instance_ref['name']) + LOG.warn(_('trying to reboot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_ref['internal_id'], + instance_ref['state'], + power_state.RUNNING, + context=context) + + LOG.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -251,8 +260,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: rescuing'), - instance_ref['internal_id']) + LOG.audit(_('instance %s: rescuing'), instance_ref['internal_id'], + context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -267,8 +276,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: unrescuing'), - instance_ref['internal_id']) + LOG.audit(_('instance %s: unrescuing'), instance_ref['internal_id'], + context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -287,8 +296,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug('instance %s: pausing', - instance_ref['internal_id']) + LOG.debug(_('instance %s: pausing'), instance_ref['internal_id'], + context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -305,8 +314,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug('instance %s: unpausing', - instance_ref['internal_id']) + LOG.debug(_('instance %s: unpausing'), instance_ref['internal_id'], + context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -364,8 +373,9 @@ class ComputeManager(manager.Manager): def get_console_output(self, context, instance_id): """Send the console output for an instance.""" context = context.elevated() - logging.debug(_("instance %s: getting console output"), instance_id) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Get console output instance %s//%s"), + instance_ref['internal_id'], instance_id, context=context) return self.driver.get_console_output(instance_ref) @@ -373,8 +383,8 @@ class ComputeManager(manager.Manager): def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - logging.debug(_("instance %s: attaching volume %s to %s"), instance_id, - volume_id, mountpoint) + LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id, + volume_id, mountpoint, context=context) instance_ref = self.db.instance_get(context, instance_id) dev_path = self.volume_manager.setup_compute_volume(context, volume_id) @@ -390,8 +400,8 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - logging.exception(_("instance %s: attach failed %s, removing"), - instance_id, mountpoint) + LOG.exception(_("instance %s: attach failed %s, removing"), + instance_id, mountpoint, context=context) self.volume_manager.remove_compute_volume(context, volume_id) raise exc @@ -402,14 +412,14 @@ class ComputeManager(manager.Manager): def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" context = context.elevated() - logging.debug(_("instance %s: detaching volume %s"), - instance_id, - volume_id) instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) + LOG.audit(_("Detach volume %s from mountpoint %s on instance %s//%s"), + volume_id, volume_ref['mountpoint'], + instance_ref['internal_id'], instance_id, context=context) if instance_ref['name'] not in self.driver.list_instances(): - logging.warn(_("Detaching volume from unknown instance %s"), - instance_ref['name']) + LOG.warn(_("Detaching volume from unknown instance %s"), + instance_ref['name'], context=context) else: self.driver.detach_volume(instance_ref['name'], volume_ref['mountpoint']) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 60c347a5e..cc94e44f4 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -25,19 +25,17 @@ Instance Monitoring: """ import datetime -import logging import os -import sys import time import boto import boto.s3 import rrdtool -from twisted.internet import defer from twisted.internet import task from twisted.application import service from nova import flags +from nova import log as logging from nova.virt import connection as virt_connection @@ -91,6 +89,9 @@ RRD_VALUES = { utcnow = datetime.datetime.utcnow +LOG = logging.getLogger('nova.instancemonitor') + + def update_rrd(instance, name, data): """ Updates the specified RRD file. @@ -255,20 +256,20 @@ class Instance(object): Updates the instances statistics and stores the resulting graphs in the internal object store on the cloud controller. """ - logging.debug(_('updating %s...'), self.instance_id) + LOG.debug(_('updating %s...'), self.instance_id) try: data = self.fetch_cpu_stats() if data != None: - logging.debug('CPU: %s', data) + LOG.debug('CPU: %s', data) update_rrd(self, 'cpu', data) data = self.fetch_net_stats() - logging.debug('NET: %s', data) + LOG.debug('NET: %s', data) update_rrd(self, 'net', data) data = self.fetch_disk_stats() - logging.debug('DISK: %s', data) + LOG.debug('DISK: %s', data) update_rrd(self, 'disk', data) # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls @@ -285,7 +286,7 @@ class Instance(object): graph_disk(self, '1w') graph_disk(self, '1m') except Exception: - logging.exception(_('unexpected error during update')) + LOG.exception(_('unexpected error during update')) self.last_updated = utcnow() @@ -309,7 +310,7 @@ class Instance(object): self.cputime = float(info['cpu_time']) self.cputime_last_updated = utcnow() - logging.debug('CPU: %d', self.cputime) + LOG.debug('CPU: %d', self.cputime) # Skip calculation on first pass. Need delta to get a meaningful value. if cputime_last_updated == None: @@ -319,17 +320,17 @@ class Instance(object): d = self.cputime_last_updated - cputime_last_updated t = d.days * 86400 + d.seconds - logging.debug('t = %d', t) + LOG.debug('t = %d', t) # Calculate change over time in number of nanoseconds of CPU time used. cputime_delta = self.cputime - cputime_last - logging.debug('cputime_delta = %s', cputime_delta) + LOG.debug('cputime_delta = %s', cputime_delta) # Get the number of virtual cpus in this domain. vcpus = int(info['num_cpu']) - logging.debug('vcpus = %d', vcpus) + LOG.debug('vcpus = %d', vcpus) # Calculate CPU % used and cap at 100. return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100) @@ -351,8 +352,8 @@ class Instance(object): rd += rd_bytes wr += wr_bytes except TypeError: - logging.error(_('Cannot get blockstats for "%s" on "%s"'), - disk, self.instance_id) + LOG.error(_('Cannot get blockstats for "%s" on "%s"'), + disk, self.instance_id) raise return '%d:%d' % (rd, wr) @@ -373,8 +374,8 @@ class Instance(object): rx += stats[0] tx += stats[4] except TypeError: - logging.error(_('Cannot get ifstats for "%s" on "%s"'), - interface, self.instance_id) + LOG.error(_('Cannot get ifstats for "%s" on "%s"'), + interface, self.instance_id) raise return '%d:%d' % (rx, tx) @@ -408,7 +409,7 @@ class InstanceMonitor(object, service.Service): try: conn = virt_connection.get_connection(read_only=True) except Exception, exn: - logging.exception(_('unexpected exception getting connection')) + LOG.exception(_('unexpected exception getting connection')) time.sleep(FLAGS.monitoring_instances_delay) return @@ -416,14 +417,14 @@ class InstanceMonitor(object, service.Service): try: self.updateInstances_(conn, domain_ids) except Exception, exn: - logging.exception('updateInstances_') + LOG.exception('updateInstances_') def updateInstances_(self, conn, domain_ids): for domain_id in domain_ids: if not domain_id in self._instances: instance = Instance(conn, domain_id) self._instances[domain_id] = instance - logging.debug(_('Found instance: %s'), domain_id) + LOG.debug(_('Found instance: %s'), domain_id) for key in self._instances.keys(): instance = self._instances[key] -- cgit From b9576a9f73195656f4a0a1327cd6bee3c4a6b6c9 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 4 Jan 2011 00:26:41 -0500 Subject: Final few log tweaks, i18n, levels, including contexts, etc. --- nova/compute/manager.py | 77 ++++++++++++++++++++++++------------------------- nova/compute/monitor.py | 2 +- 2 files changed, 39 insertions(+), 40 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index cc5724346..0098ded74 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -52,8 +52,7 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', flags.DEFINE_string('stub_network', False, 'Stub network related code') - -LOG = logging.getLogger('nova.computemanager') +LOG = logging.getLogger('nova.compute.manager') class ComputeManager(manager.Manager): @@ -114,7 +113,8 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) - LOG.debug(_("instance %s: starting..."), instance_id) + LOG.audit(_("instance %s//%s: starting..."), + instance_ref['internal_id'], instance_id, context=context) self.db.instance_update(context, instance_id, {'host': self.host}) @@ -152,8 +152,9 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - LOG.exception(_("instance %s: Failed to spawn"), - instance_ref['name']) + LOG.exception(_("instance %s//%s: Failed to spawn"), + instance_ref['internal_id'], instance_id, + context=context) self.db.instance_set_state(context, instance_id, power_state.SHUTDOWN) @@ -193,8 +194,6 @@ class ComputeManager(manager.Manager): self.network_manager.deallocate_fixed_ip(context.elevated(), address) - LOG.debug(_("instance %s: terminating"), instance_id, context=context) - volumes = instance_ref.get('volumes', []) or [] for volume in volumes: self.detach_volume(context, instance_id, volume['id']) @@ -217,14 +216,13 @@ class ComputeManager(manager.Manager): instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - LOG.warn(_('trying to reboot a non-running ' + .warn(_('trying to reboot a non-running ' 'instance: %s (state: %s excepted: %s)'), instance_ref['internal_id'], instance_ref['state'], power_state.RUNNING, context=context) - LOG.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -244,13 +242,14 @@ class ComputeManager(manager.Manager): # potentially? self._update_state(context, instance_id) - logging.debug(_('instance %s: snapshotting'), instance_ref['name']) + LOG.audit(_('instance %s//%s: snapshotting'), + instance_ref['internal_id'], instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to snapshot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_ref['internal_id'], - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to snapshot a non-running ' + 'instance: %s//%s (state: %s excepted: %s)'), + instance_ref['internal_id'], instance_id, + instance_ref['state'], + power_state.RUNNING) self.driver.snapshot(instance_ref, name) @@ -259,9 +258,8 @@ class ComputeManager(manager.Manager): """Rescue an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - LOG.audit(_('instance %s: rescuing'), instance_ref['internal_id'], - context=context) + LOG.audit(_('instance %s//%s: rescuing'), instance_ref['internal_id'], + instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -276,8 +274,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: unrescuing'), instance_ref['internal_id'], - context=context) + LOG.audit(_('instance %s//%s: unrescuing'), + instance_ref['internal_id'], instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -295,9 +293,8 @@ class ComputeManager(manager.Manager): """Pause an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - LOG.debug(_('instance %s: pausing'), instance_ref['internal_id'], - context=context) + LOG.audit(_('instance %s//%s: pausing'), instance_ref['internal_id'], + instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -313,9 +310,8 @@ class ComputeManager(manager.Manager): """Unpause a paused instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - LOG.debug(_('instance %s: unpausing'), instance_ref['internal_id'], - context=context) + LOG.audit(_('instance %s//%s: unpausing'), + instance_ref['internal_id'], instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -332,8 +328,9 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref["state"] == power_state.RUNNING: - logging.debug(_("instance %s: retrieving diagnostics"), - instance_ref["internal_id"]) + LOG.audit(_("instance %s//%s: retrieving diagnostics"), + instance_ref["internal_id"], instance_id, + context=context) return self.driver.get_diagnostics(instance_ref) @exception.wrap_exception @@ -342,8 +339,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: suspending'), - instance_ref['internal_id']) + LOG.audit(_('instance %s//%s: suspending'), + instance_ref['internal_id'], instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'suspending') @@ -359,7 +356,8 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: resuming'), instance_ref['internal_id']) + LOG.audit(_('instance %s//%s: resuming'), instance_ref['internal_id'], + instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'resuming') @@ -374,18 +372,18 @@ class ComputeManager(manager.Manager): """Send the console output for an instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_("Get console output instance %s//%s"), + LOG.audit(_("Get console output for instance %s//%s"), instance_ref['internal_id'], instance_id, context=context) - return self.driver.get_console_output(instance_ref) @exception.wrap_exception def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id, - volume_id, mountpoint, context=context) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("instance %s//%s: attaching volume %s to %s"), + instance_ref['internal_id'], instance_id, + volume_id, mountpoint, context=context) dev_path = self.volume_manager.setup_compute_volume(context, volume_id) try: @@ -400,8 +398,9 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - LOG.exception(_("instance %s: attach failed %s, removing"), - instance_id, mountpoint, context=context) + LOG.exception(_("instance %s//%s: attach failed %s, removing"), + instance_ref['internal_id'], instance_id, + mountpoint, context=context) self.volume_manager.remove_compute_volume(context, volume_id) raise exc @@ -418,8 +417,8 @@ class ComputeManager(manager.Manager): volume_id, volume_ref['mountpoint'], instance_ref['internal_id'], instance_id, context=context) if instance_ref['name'] not in self.driver.list_instances(): - LOG.warn(_("Detaching volume from unknown instance %s"), - instance_ref['name'], context=context) + LOG.warn(_("Detaching volume from unknown instance %s//%s"), + instance_ref['internal_id'], instance_id, context=context) else: self.driver.detach_volume(instance_ref['name'], volume_ref['mountpoint']) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index cc94e44f4..14d0e8ca1 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -89,7 +89,7 @@ RRD_VALUES = { utcnow = datetime.datetime.utcnow -LOG = logging.getLogger('nova.instancemonitor') +LOG = logging.getLogger('nova.compute.monitor') def update_rrd(instance, name, data): -- cgit From 45f2f563d1722d2f4d81d49de9d6a3cfd3d0fe3e Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 4 Jan 2011 00:37:47 -0500 Subject: Don't know where that LOG went... --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0098ded74..d68801146 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -216,7 +216,7 @@ class ComputeManager(manager.Manager): instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - .warn(_('trying to reboot a non-running ' + LOG.warn(_('trying to reboot a non-running ' 'instance: %s (state: %s excepted: %s)'), instance_ref['internal_id'], instance_ref['state'], -- cgit From e97cb0f19f66ee4d28685575cea57b1eb32c4ed3 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Tue, 4 Jan 2011 13:56:36 -0800 Subject: Moved __init__ api code to api.py and changed allowed_instances quota method argument to accept all type data, not just vcpu count. --- nova/compute/__init__.py | 369 +-------------------------------------------- nova/compute/api.py | 385 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 386 insertions(+), 368 deletions(-) create mode 100644 nova/compute/api.py (limited to 'nova/compute') diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py index 9efd90909..b94f971d1 100644 --- a/nova/compute/__init__.py +++ b/nova/compute/__init__.py @@ -16,371 +16,4 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles all requests relating to instances (guest vms). -""" - -import datetime -import logging -import time - -from nova import db -from nova import exception -from nova import flags -from nova import network -from nova import quota -from nova import rpc -from nova import utils -from nova import volume -from nova.compute import instance_types -from nova.db import base - -FLAGS = flags.FLAGS - - -def generate_default_hostname(instance_id): - """Default function to generate a hostname given an instance reference.""" - return str(instance_id) - - -class API(base.Base): - """API for interacting with the compute manager.""" - - def __init__(self, image_service=None, network_api=None, volume_api=None, - **kwargs): - if not image_service: - image_service = utils.import_object(FLAGS.image_service) - self.image_service = image_service - if not network_api: - network_api = network.API() - self.network_api = network_api - if not volume_api: - volume_api = volume.API() - self.volume_api = volume_api - super(API, self).__init__(**kwargs) - - def get_network_topic(self, context, instance_id): - try: - instance = self.get(context, instance_id) - except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) - raise e - - host = instance['host'] - if not host: - raise exception.Error("Instance %d has no host" % instance_id) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) - return rpc.call(context, - topic, - {"method": "get_network_topic", "args": {'fake': 1}}) - - def create(self, context, instance_type, - image_id, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, - display_name='', display_description='', - key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None, - generate_hostname=generate_default_hostname): - """Create the number of instances requested if quota and - other arguments check out ok.""" - - type_data = instance_types.INSTANCE_TYPES[instance_type] - num_instances = quota.allowed_instances(context, max_count, - type_data['vcpus']) - if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % - num_instances, "InstanceLimitExceeded") - - is_vpn = image_id == FLAGS.vpn_image_id - if not is_vpn: - image = self.image_service.show(context, image_id) - if kernel_id is None: - kernel_id = image.get('kernelId', None) - if ramdisk_id is None: - ramdisk_id = image.get('ramdiskId', None) - # No kernel and ramdisk for raw images - if kernel_id == str(FLAGS.null_kernel): - kernel_id = None - ramdisk_id = None - logging.debug("Creating a raw instance") - # Make sure we have access to kernel and ramdisk (if not raw) - if kernel_id: - self.image_service.show(context, kernel_id) - if ramdisk_id: - self.image_service.show(context, ramdisk_id) - - if security_group is None: - security_group = ['default'] - if not type(security_group) is list: - security_group = [security_group] - - security_groups = [] - self.ensure_default_security_group(context) - for security_group_name in security_group: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) - security_groups.append(group['id']) - - if key_data is None and key_name: - key_pair = db.key_pair_get(context, context.user_id, key_name) - key_data = key_pair['public_key'] - - base_options = { - 'reservation_id': utils.generate_uid('r'), - 'image_id': image_id, - 'kernel_id': kernel_id or '', - 'ramdisk_id': ramdisk_id or '', - 'state_description': 'scheduling', - 'user_id': context.user_id, - 'project_id': context.project_id, - 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), - 'instance_type': instance_type, - 'memory_mb': type_data['memory_mb'], - 'vcpus': type_data['vcpus'], - 'local_gb': type_data['local_gb'], - 'display_name': display_name, - 'display_description': display_description, - 'user_data': user_data or '', - 'key_name': key_name, - 'key_data': key_data, - 'availability_zone': availability_zone} - - elevated = context.elevated() - instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) - for num in range(num_instances): - instance = dict(mac_address=utils.generate_mac(), - launch_index=num, - **base_options) - instance = self.db.instance_create(context, instance) - instance_id = instance['id'] - - elevated = context.elevated() - if not security_groups: - security_groups = [] - for security_group_id in security_groups: - self.db.instance_add_security_group(elevated, - instance_id, - security_group_id) - - # Set sane defaults if not specified - updates = dict(hostname=generate_hostname(instance_id)) - if 'display_name' not in instance: - updates['display_name'] = "Server %s" % instance_id - - instance = self.update(context, instance_id, **updates) - instances.append(instance) - - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), - context.project_id, context.user_id, instance_id) - rpc.cast(context, - FLAGS.scheduler_topic, - {"method": "run_instance", - "args": {"topic": FLAGS.compute_topic, - "instance_id": instance_id}}) - - return instances - - def ensure_default_security_group(self, context): - """ Create security group for the security context if it - does not already exist - - :param context: the security context - - """ - try: - db.security_group_get_by_name(context, context.project_id, - 'default') - except exception.NotFound: - values = {'name': 'default', - 'description': 'default', - 'user_id': context.user_id, - 'project_id': context.project_id} - db.security_group_create(context, values) - - def update(self, context, instance_id, **kwargs): - """Updates the instance in the datastore. - - :param context: The security context - :param instance_id: ID of the instance to update - :param kwargs: All additional keyword args are treated - as data fields of the instance to be - updated - - :retval None - - """ - return self.db.instance_update(context, instance_id, kwargs) - - def delete(self, context, instance_id): - logging.debug("Going to try and terminate %s" % instance_id) - try: - instance = self.get(context, instance_id) - except exception.NotFound as e: - logging.warning(_("Instance %s was not found during terminate"), - instance_id) - raise e - - if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %s is already being terminated"), - instance_id) - return - - self.update(context, - instance['id'], - state_description='terminating', - state=0, - terminated_at=datetime.datetime.utcnow()) - - host = instance['host'] - if host: - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "terminate_instance", - "args": {"instance_id": instance_id}}) - else: - self.db.instance_destroy(context, instance_id) - - def get(self, context, instance_id=None, project_id=None, - reservation_id=None, fixed_ip=None): - """Get one or more instances, possibly filtered by one of the - given parameters. If there is no filter and the context is - an admin, it will retreive all instances in the system.""" - if instance_id is not None: - return self.db.instance_get_by_id(context, instance_id) - if reservation_id is not None: - return self.db.instance_get_all_by_reservation(context, - reservation_id) - if fixed_ip is not None: - return self.db.fixed_ip_get_instance(context, fixed_ip) - if project_id or not context.is_admin: - if not context.project: - return self.db.instance_get_all_by_user(context, - context.user_id) - if project_id is None: - project_id = context.project_id - return self.db.instance_get_all_by_project(context, - project_id) - return self.db.instance_get_all(context) - - def snapshot(self, context, instance_id, name): - """Snapshot the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "snapshot_instance", - "args": {"instance_id": instance_id, "name": name}}) - - def reboot(self, context, instance_id): - """Reboot the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "reboot_instance", - "args": {"instance_id": instance_id}}) - - def pause(self, context, instance_id): - """Pause the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "pause_instance", - "args": {"instance_id": instance_id}}) - - def unpause(self, context, instance_id): - """Unpause the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unpause_instance", - "args": {"instance_id": instance_id}}) - - def get_diagnostics(self, context, instance_id): - """Retrieve diagnostics for the given instance.""" - instance = self.get(context, instance_id) - host = instance["host"] - return rpc.call(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "get_diagnostics", - "args": {"instance_id": instance_id}}) - - def get_actions(self, context, instance_id): - """Retrieve actions for the given instance.""" - return self.db.instance_get_actions(context, instance_id) - - def suspend(self, context, instance_id): - """suspend the instance with instance_id""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "suspend_instance", - "args": {"instance_id": instance_id}}) - - def resume(self, context, instance_id): - """resume the instance with instance_id""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "resume_instance", - "args": {"instance_id": instance_id}}) - - def rescue(self, context, instance_id): - """Rescue the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "rescue_instance", - "args": {"instance_id": instance_id}}) - - def unrescue(self, context, instance_id): - """Unrescue the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unrescue_instance", - "args": {"instance_id": instance_id}}) - - def attach_volume(self, context, instance_id, volume_id, device): - if not re.match("^/dev/[a-z]d[a-z]+$", device): - raise exception.ApiError(_("Invalid device specified: %s. " - "Example device: /dev/vdb") % device) - self.volume_api.check_attach(context, volume_id) - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "attach_volume", - "args": {"volume_id": volume_id, - "instance_id": instance_id, - "mountpoint": device}}) - - def detach_volume(self, context, volume_id): - instance = self.db.volume_get_instance(context.elevated(), volume_id) - if not instance: - raise exception.ApiError(_("Volume isn't attached to anything!")) - self.volume_api.check_detach(context, volume_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "detach_volume", - "args": {"instance_id": instance['id'], - "volume_id": volume_id}}) - return instance - - def associate_floating_ip(self, context, instance_id, address): - instance = self.get(context, instance_id) - self.network_api.associate_floating_ip(context, address, - instance['fixed_ip']) +from nova.compute.api import API diff --git a/nova/compute/api.py b/nova/compute/api.py new file mode 100644 index 000000000..74d030c4d --- /dev/null +++ b/nova/compute/api.py @@ -0,0 +1,385 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to instances (guest vms). +""" + +import datetime +import logging +import time + +from nova import db +from nova import exception +from nova import flags +from nova import network +from nova import quota +from nova import rpc +from nova import utils +from nova import volume +from nova.compute import instance_types +from nova.db import base + +FLAGS = flags.FLAGS + + +def generate_default_hostname(instance_id): + """Default function to generate a hostname given an instance reference.""" + return str(instance_id) + + +class API(base.Base): + """API for interacting with the compute manager.""" + + def __init__(self, image_service=None, network_api=None, volume_api=None, + **kwargs): + if not image_service: + image_service = utils.import_object(FLAGS.image_service) + self.image_service = image_service + if not network_api: + network_api = network.API() + self.network_api = network_api + if not volume_api: + volume_api = volume.API() + self.volume_api = volume_api + super(API, self).__init__(**kwargs) + + def get_network_topic(self, context, instance_id): + try: + instance = self.get(context, instance_id) + except exception.NotFound as e: + logging.warning("Instance %d was not found in get_network_topic", + instance_id) + raise e + + host = instance['host'] + if not host: + raise exception.Error("Instance %d has no host" % instance_id) + topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) + return rpc.call(context, + topic, + {"method": "get_network_topic", "args": {'fake': 1}}) + + def create(self, context, instance_type, + image_id, kernel_id=None, ramdisk_id=None, + min_count=1, max_count=1, + display_name='', display_description='', + key_name=None, key_data=None, security_group='default', + availability_zone=None, user_data=None, + generate_hostname=generate_default_hostname): + """Create the number of instances requested if quota and + other arguments check out ok.""" + + type_data = instance_types.INSTANCE_TYPES[instance_type] + num_instances = quota.allowed_instances(context, max_count, type_data) + if num_instances < min_count: + logging.warn("Quota exceeeded for %s, tried to run %s instances", + context.project_id, min_count) + raise quota.QuotaError("Instance quota exceeded. You can only " + "run %s more instances of this type." % + num_instances, "InstanceLimitExceeded") + + is_vpn = image_id == FLAGS.vpn_image_id + if not is_vpn: + image = self.image_service.show(context, image_id) + if kernel_id is None: + kernel_id = image.get('kernelId', None) + if ramdisk_id is None: + ramdisk_id = image.get('ramdiskId', None) + # No kernel and ramdisk for raw images + if kernel_id == str(FLAGS.null_kernel): + kernel_id = None + ramdisk_id = None + logging.debug("Creating a raw instance") + # Make sure we have access to kernel and ramdisk (if not raw) + if kernel_id: + self.image_service.show(context, kernel_id) + if ramdisk_id: + self.image_service.show(context, ramdisk_id) + + if security_group is None: + security_group = ['default'] + if not type(security_group) is list: + security_group = [security_group] + + security_groups = [] + self.ensure_default_security_group(context) + for security_group_name in security_group: + group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + security_groups.append(group['id']) + + if key_data is None and key_name: + key_pair = db.key_pair_get(context, context.user_id, key_name) + key_data = key_pair['public_key'] + + base_options = { + 'reservation_id': utils.generate_uid('r'), + 'image_id': image_id, + 'kernel_id': kernel_id or '', + 'ramdisk_id': ramdisk_id or '', + 'state_description': 'scheduling', + 'user_id': context.user_id, + 'project_id': context.project_id, + 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), + 'instance_type': instance_type, + 'memory_mb': type_data['memory_mb'], + 'vcpus': type_data['vcpus'], + 'local_gb': type_data['local_gb'], + 'display_name': display_name, + 'display_description': display_description, + 'user_data': user_data or '', + 'key_name': key_name, + 'key_data': key_data, + 'availability_zone': availability_zone} + + elevated = context.elevated() + instances = [] + logging.debug(_("Going to run %s instances..."), num_instances) + for num in range(num_instances): + instance = dict(mac_address=utils.generate_mac(), + launch_index=num, + **base_options) + instance = self.db.instance_create(context, instance) + instance_id = instance['id'] + + elevated = context.elevated() + if not security_groups: + security_groups = [] + for security_group_id in security_groups: + self.db.instance_add_security_group(elevated, + instance_id, + security_group_id) + + # Set sane defaults if not specified + updates = dict(hostname=generate_hostname(instance_id)) + if 'display_name' not in instance: + updates['display_name'] = "Server %s" % instance_id + + instance = self.update(context, instance_id, **updates) + instances.append(instance) + + logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + context.project_id, context.user_id, instance_id) + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "run_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id}}) + + return instances + + def ensure_default_security_group(self, context): + """ Create security group for the security context if it + does not already exist + + :param context: the security context + + """ + try: + db.security_group_get_by_name(context, context.project_id, + 'default') + except exception.NotFound: + values = {'name': 'default', + 'description': 'default', + 'user_id': context.user_id, + 'project_id': context.project_id} + db.security_group_create(context, values) + + def update(self, context, instance_id, **kwargs): + """Updates the instance in the datastore. + + :param context: The security context + :param instance_id: ID of the instance to update + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + updated + + :retval None + + """ + return self.db.instance_update(context, instance_id, kwargs) + + def delete(self, context, instance_id): + logging.debug("Going to try and terminate %s" % instance_id) + try: + instance = self.get(context, instance_id) + except exception.NotFound as e: + logging.warning(_("Instance %s was not found during terminate"), + instance_id) + raise e + + if (instance['state_description'] == 'terminating'): + logging.warning(_("Instance %s is already being terminated"), + instance_id) + return + + self.update(context, + instance['id'], + state_description='terminating', + state=0, + terminated_at=datetime.datetime.utcnow()) + + host = instance['host'] + if host: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "terminate_instance", + "args": {"instance_id": instance_id}}) + else: + self.db.instance_destroy(context, instance_id) + + def get(self, context, instance_id=None, project_id=None, + reservation_id=None, fixed_ip=None): + """Get one or more instances, possibly filtered by one of the + given parameters. If there is no filter and the context is + an admin, it will retreive all instances in the system.""" + if instance_id is not None: + return self.db.instance_get_by_id(context, instance_id) + if reservation_id is not None: + return self.db.instance_get_all_by_reservation(context, + reservation_id) + if fixed_ip is not None: + return self.db.fixed_ip_get_instance(context, fixed_ip) + if project_id or not context.is_admin: + if not context.project: + return self.db.instance_get_all_by_user(context, + context.user_id) + if project_id is None: + project_id = context.project_id + return self.db.instance_get_all_by_project(context, + project_id) + return self.db.instance_get_all(context) + + def snapshot(self, context, instance_id, name): + """Snapshot the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "snapshot_instance", + "args": {"instance_id": instance_id, "name": name}}) + + def reboot(self, context, instance_id): + """Reboot the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "reboot_instance", + "args": {"instance_id": instance_id}}) + + def pause(self, context, instance_id): + """Pause the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "pause_instance", + "args": {"instance_id": instance_id}}) + + def unpause(self, context, instance_id): + """Unpause the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unpause_instance", + "args": {"instance_id": instance_id}}) + + def get_diagnostics(self, context, instance_id): + """Retrieve diagnostics for the given instance.""" + instance = self.get(context, instance_id) + host = instance["host"] + return rpc.call(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "get_diagnostics", + "args": {"instance_id": instance_id}}) + + def get_actions(self, context, instance_id): + """Retrieve actions for the given instance.""" + return self.db.instance_get_actions(context, instance_id) + + def suspend(self, context, instance_id): + """suspend the instance with instance_id""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "suspend_instance", + "args": {"instance_id": instance_id}}) + + def resume(self, context, instance_id): + """resume the instance with instance_id""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "resume_instance", + "args": {"instance_id": instance_id}}) + + def rescue(self, context, instance_id): + """Rescue the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "rescue_instance", + "args": {"instance_id": instance_id}}) + + def unrescue(self, context, instance_id): + """Unrescue the given instance.""" + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "unrescue_instance", + "args": {"instance_id": instance_id}}) + + def attach_volume(self, context, instance_id, volume_id, device): + if not re.match("^/dev/[a-z]d[a-z]+$", device): + raise exception.ApiError(_("Invalid device specified: %s. " + "Example device: /dev/vdb") % device) + self.volume_api.check_attach(context, volume_id) + instance = self.get(context, instance_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + + def detach_volume(self, context, volume_id): + instance = self.db.volume_get_instance(context.elevated(), volume_id) + if not instance: + raise exception.ApiError(_("Volume isn't attached to anything!")) + self.volume_api.check_detach(context, volume_id) + host = instance['host'] + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "detach_volume", + "args": {"instance_id": instance['id'], + "volume_id": volume_id}}) + return instance + + def associate_floating_ip(self, context, instance_id, address): + instance = self.get(context, instance_id) + self.network_api.associate_floating_ip(context, address, + instance['fixed_ip']) -- cgit From def5583469bd265c9107ed54d461441bc6303151 Mon Sep 17 00:00:00 2001 From: Eric Day Date: Wed, 5 Jan 2011 09:50:19 -0800 Subject: Split internal API get calls to get and get_all, where the former takes an ID and returns one resource, and the latter can optionally take a filter and return a list of resources. --- nova/compute/api.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 74d030c4d..64d47b1ce 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -245,13 +245,15 @@ class API(base.Base): else: self.db.instance_destroy(context, instance_id) - def get(self, context, instance_id=None, project_id=None, - reservation_id=None, fixed_ip=None): - """Get one or more instances, possibly filtered by one of the + def get(self, context, instance_id): + """Get a single instance with the given ID.""" + return self.db.instance_get_by_id(context, instance_id) + + def get_all(self, context, project_id=None, reservation_id=None, + fixed_ip=None): + """Get all instances, possibly filtered by one of the given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system.""" - if instance_id is not None: - return self.db.instance_get_by_id(context, instance_id) if reservation_id is not None: return self.db.instance_get_all_by_reservation(context, reservation_id) -- cgit From 006a3c43093ce3324173e0aed172a3be1396d5dc Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 6 Jan 2011 16:21:59 -0600 Subject: altered argument handling --- nova/compute/manager.py | 44 +++++++++++++++----------------------------- 1 file changed, 15 insertions(+), 29 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 2671d401c..a5343ff22 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -62,39 +62,25 @@ def checks_instance_lock(function): """ @functools.wraps(function) - def decorated_function(*args, **kwargs): - - logging.info(_("check_instance_locks decorating |%s|"), function) - logging.info(_("check_instance_locks: arguments: |%s| |%s|"), args, - kwargs) - # assume worst case (have to declare so they are in scope) - admin = False - locked = True - instance_id = False - - # grab args to function - try: - if 'context' in kwargs: - context = kwargs['context'] - else: - context = args[1] - if 'instance_id' in kwargs: - instance_id = kwargs['instance_id'] - else: - instance_id = args[2] - locked = args[0].get_lock(context, instance_id) - admin = context.is_admin - except Exception as e: - logging.error(_("check_instance_lock fail! args: |%s| |%s|"), args, - kwargs) - raise e + def decorated_function(self, context, instance_id, *args, **kwargs): + + logging.info(_("check_instance_lock: decorating: |%s|"), function) + logging.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"), + self, + context, + instance_id) + unlocked = not self.get_lock(context, instance_id) + admin = context.is_admin + logging.info(_("check_instance_lock: locked: |%s|"), locked) + logging.info(_("check_instance_lock: admin: |%s|"), admin) # if admin or unlocked call function otherwise log error - if admin or not locked: + if admin or unlocked: + logging.info(_("check_instance_lock: executing: |%s|"), function) function(*args, **kwargs) else: - logging.error(_("Instance |%s| is locked, cannot execute |%s|"), - instance_id, function) + logging.error(_("check_instance_lock: not executing |%s|"), + function) return False return decorated_function -- cgit From 11d6e9d2f917d124946d0fa47c1512a1f8ab940d Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 6 Jan 2011 17:02:20 -0600 Subject: accidentally left unlocked in there, it should have been locked --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 1ed1eb40a..3c20b7c73 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -69,13 +69,13 @@ def checks_instance_lock(function): self, context, instance_id) - unlocked = not self.get_lock(context, instance_id) + locked = self.get_lock(context, instance_id) admin = context.is_admin logging.info(_("check_instance_lock: locked: |%s|"), locked) logging.info(_("check_instance_lock: admin: |%s|"), admin) # if admin or unlocked call function otherwise log error - if admin or unlocked: + if admin or not locked: logging.info(_("check_instance_lock: executing: |%s|"), function) function(*args, **kwargs) else: -- cgit From 7450f84e0f491f8a24273135432e105677c4a589 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 6 Jan 2011 17:23:00 -0600 Subject: passing the correct parameters to decorated function --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3c20b7c73..b7ec2e93b 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -77,7 +77,7 @@ def checks_instance_lock(function): # if admin or unlocked call function otherwise log error if admin or not locked: logging.info(_("check_instance_lock: executing: |%s|"), function) - function(*args, **kwargs) + function(self, context, isntance_id, *args, **kwargs) else: logging.error(_("check_instance_lock: not executing |%s|"), function) -- cgit From ebec92778bdaf4af58029f9977697865c53f881d Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 6 Jan 2011 17:25:17 -0600 Subject: refers to instance_id instead of instance_ref[instance_id] --- nova/compute/manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index b7ec2e93b..24c321130 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -411,7 +411,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: locking'), instance_ref['internal_id']) + logging.debug(_('instance %s: locking'), instance_id) self.db.instance_update(context, instance_id, {'locked': True}) @exception.wrap_exception @@ -423,7 +423,7 @@ class ComputeManager(manager.Manager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - logging.debug(_('instance %s: unlocking'), instance_ref['internal_id']) + logging.debug(_('instance %s: unlocking'), instance_id) self.db.instance_update(context, instance_id, {'locked': False}) @exception.wrap_exception -- cgit From 76e3923c40dff2f754b045847d8ad19ea9a7cef1 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Thu, 6 Jan 2011 17:27:57 -0600 Subject: typo --- nova/compute/manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'nova/compute') diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 24c321130..d5136eb26 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -77,7 +77,7 @@ def checks_instance_lock(function): # if admin or unlocked call function otherwise log error if admin or not locked: logging.info(_("check_instance_lock: executing: |%s|"), function) - function(self, context, isntance_id, *args, **kwargs) + function(self, context, instance_id, *args, **kwargs) else: logging.error(_("check_instance_lock: not executing |%s|"), function) -- cgit From 19ffc1275814a6c00f6ff19dd0c03060143d097a Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 7 Jan 2011 12:08:22 +0100 Subject: Remove redundant import of nova.context. Use db instance attribute rather than module directly. --- nova/compute/api.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'nova/compute') diff --git a/nova/compute/api.py b/nova/compute/api.py index 2c2937f48..0d04d344c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -24,7 +24,6 @@ import datetime import logging import time -from nova import context from nova import db from nova import exception from nova import flags @@ -210,7 +209,7 @@ class API(base.Base): def trigger_security_group_rules_refresh(self, context, security_group_id): """Called when a rule is added to or removed from a security_group""" - security_group = db.security_group_get(context, security_group_id) + security_group = self.db.security_group_get(context, security_group_id) hosts = set() for instance in security_group['instances']: @@ -232,7 +231,8 @@ class API(base.Base): # First, we get the security group rules that reference this group as # the grantee.. security_group_rules = \ - db.security_group_rule_get_by_security_group_grantee(context, + self.db.security_group_rule_get_by_security_group_grantee( + context, group_id) # ..then we distill the security groups to which they belong.. -- cgit