diff options
| author | Russell Bryant <rbryant@redhat.com> | 2012-08-06 19:03:31 -0400 |
|---|---|---|
| committer | Russell Bryant <rbryant@redhat.com> | 2012-08-07 10:18:58 -0400 |
| commit | 0bfe1fd5639d1e1bb62bc8de1d868efa6214d922 (patch) | |
| tree | 9e4577014bff9bef81d0fb29fa378c68d09ea9ba | |
| parent | 24cfd0d6f2667d65f81b43fc76af71fd33b78de7 (diff) | |
Remove temporary hack from checks_instance_lock.
The checks_instance_lock decorator was temporarily made a mess of while
the compute manager was being converted over to taking an instance
instead of instance_uuid as a parameter. Now that all methods that use
this decorator have been converted, the code can be simplified.
The _get_lock() method was updated to only look up an instance from the
db if necessary. One of the unit tests had to be updated to account for
this. These changes illustrate a good point about all of these
no-db-messaging patches. Aside from silly typos, the types of bugs most
likely to occur due to these changes are due to code looking at stale
instance state since the state is being pulled out of the database less
often. Hopefully I've caught everything, but it was worth pointing out
as something others should be aware of.
Part of blueprint no-db-messaging.
Change-Id: I02e6a5f2354f2e9ae8ea403b43359eb1bc8bae27
| -rw-r--r-- | nova/compute/manager.py | 59 | ||||
| -rw-r--r-- | nova/tests/compute/test_compute.py | 6 |
2 files changed, 21 insertions, 44 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d8c021a52..2f9d6066e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -157,17 +157,18 @@ def publisher_id(host=None): def checks_instance_lock(function): """Decorator to prevent action against locked instances for non-admins.""" - # NOTE(russellb): There are two versions of the checks_instance_lock - # decorator. This function is the core code for it. This just serves - # as a transition from when every function expected a context - # and instance_uuid as positional arguments to where everything is a kwarg, - # and the function may get either an instance_uuid or an instance. - def _checks_instance_lock_core(self, cb, context, *args, **kwargs): - instance_uuid = kwargs['instance_uuid'] + @functools.wraps(function) + def decorated_function(self, context, *args, **kwargs): + instance = kwargs.get('instance', None) + if instance: + instance_uuid = instance['uuid'] + else: + instance_uuid = kwargs['instance_uuid'] + if context.instance_lock_checked: locked = False # Implied, since we wouldn't be here otherwise else: - locked = self._get_lock(context, instance_uuid) + locked = self._get_lock(context, instance_uuid, instance) admin = context.is_admin LOG.info(_("check_instance_lock: locked: |%s|"), locked, @@ -177,39 +178,12 @@ def checks_instance_lock(function): # if admin or unlocked call function otherwise log error if admin or not locked: - cb(self, context, *args, **kwargs) + function(self, context, *args, **kwargs) else: LOG.error(_("check_instance_lock: not executing |%s|"), function, context=context, instance_uuid=instance_uuid) - @functools.wraps(function) - def decorated_function(self, context, instance_uuid, *args, **kwargs): - - def _cb(self, context, *args, **kwargs): - instance_uuid = kwargs.pop('instance_uuid') - function(self, context, instance_uuid, *args, **kwargs) - - kwargs['instance_uuid'] = instance_uuid - - return _checks_instance_lock_core(self, _cb, context, *args, **kwargs) - - @functools.wraps(function) - def decorated_function_new(self, context, *args, **kwargs): - if 'instance_uuid' not in kwargs: - kwargs['instance_uuid'] = kwargs['instance']['uuid'] - - def _cb(self, context, *args, **kwargs): - function(self, context, *args, **kwargs) - - return _checks_instance_lock_core(self, _cb, context, *args, **kwargs) - - expected_args = ['context', 'instance_uuid'] - argspec = inspect.getargspec(function) - - if expected_args == argspec.args[1:len(expected_args) + 1]: - return decorated_function - else: - return decorated_function_new + return decorated_function def wrap_instance_fault(function): @@ -1834,14 +1808,15 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @wrap_instance_fault - def _get_lock(self, context, instance_uuid): + def _get_lock(self, context, instance_uuid=None, instance=None): """Return the boolean state of the given instance's lock.""" - context = context.elevated() - instance_ref = self.db.instance_get_by_uuid(context, instance_uuid) + if not instance: + context = context.elevated() + instance = self.db.instance_get_by_uuid(context, instance_uuid) LOG.debug(_('Getting locked state'), context=context, - instance=instance_ref) - return instance_ref['locked'] + instance=instance) + return instance['locked'] @checks_instance_lock @wrap_instance_fault diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 0b0316e26..bc60233dd 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -1103,17 +1103,19 @@ class ComputeTestCase(BaseTestCase): instance = db.instance_get_by_uuid(self.context, instance_uuid) self.assertEqual(instance['task_state'], task_state) - db.instance_update(self.context, instance_uuid, - {'task_state': task_states.REBOOTING}) + instance = db.instance_update(self.context, instance_uuid, + {'task_state': task_states.REBOOTING}) # should fail with locked nonadmin context, task_state won't be cleared self.compute_api.lock(self.context, instance) + instance = db.instance_get_by_uuid(self.context, instance_uuid) self.compute.reboot_instance(non_admin_context, instance=jsonutils.to_primitive(instance)) check_task_state(task_states.REBOOTING) # should succeed with unlocked nonadmin context, task_state cleared self.compute_api.unlock(self.context, instance) + instance = db.instance_get_by_uuid(self.context, instance_uuid) self.compute.reboot_instance(non_admin_context, instance=jsonutils.to_primitive(instance)) check_task_state(None) |
