summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark McLoughlin <markmc@redhat.com>2012-07-26 14:16:24 +0100
committerMark McLoughlin <markmc@redhat.com>2012-07-26 16:53:17 +0100
commitac180dc10aa0ee7eb5e288f4d51cfe46bff33b2d (patch)
tree65f2450993bc88a70fef3708c1cf38fea86ecf05
parent1dab66327368dab8271ee68db0c0bbb97c8d0555 (diff)
downloadnova-ac180dc10aa0ee7eb5e288f4d51cfe46bff33b2d.tar.gz
nova-ac180dc10aa0ee7eb5e288f4d51cfe46bff33b2d.tar.xz
nova-ac180dc10aa0ee7eb5e288f4d51cfe46bff33b2d.zip
Remove return values from some compute RPC methods
The checks_instance_lock decorator currently discards any return value from the functions it wraps. Luckily, none of the callers of these functions check for a return value. Since the return values are unused, just remove them. Fixing the decorator to return them would effectively be a change to the RPC API. And since we know the return value is unused, we also know that nothing checks for the False return from checks_instance_lock() and that too can be removed. Change-Id: I8b49107dba51caf52665341e4977de179b6404f6
-rw-r--r--nova/compute/manager.py19
-rw-r--r--nova/tests/compute/test_compute.py19
2 files changed, 22 insertions, 16 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 836d33452..b0994b5e0 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -188,7 +188,6 @@ def checks_instance_lock(function):
else:
LOG.error(_("check_instance_lock: not executing |%s|"),
function, context=context, instance_uuid=instance_uuid)
- return False
@functools.wraps(function)
def decorated_function(self, context, instance_uuid, *args, **kwargs):
@@ -1684,8 +1683,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance,
network_id)
- network_info = self.inject_network_info(context,
- instance['uuid'])
+ network_info = self._inject_network_info(context, instance['uuid'])
self.reset_network(context, instance['uuid'])
self._notify_about_instance_usage(
@@ -1707,8 +1705,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref,
address)
- network_info = self.inject_network_info(context,
- instance_ref['uuid'])
+ network_info = self._inject_network_info(context, instance_ref['uuid'])
self.reset_network(context, instance_ref['uuid'])
self._notify_about_instance_usage(
@@ -1872,9 +1869,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.debug(_('Reset network'), context=context, instance=instance)
self.driver.reset_network(instance)
- @checks_instance_lock
- @wrap_instance_fault
- def inject_network_info(self, context, instance_uuid):
+ def _inject_network_info(self, context, instance_uuid):
"""Inject network info for the given instance."""
instance = self.db.instance_get_by_uuid(context, instance_uuid)
LOG.debug(_('Inject network info'), context=context, instance=instance)
@@ -1887,6 +1882,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._legacy_nw_info(network_info))
return network_info
+ @checks_instance_lock
+ @wrap_instance_fault
+ def inject_network_info(self, context, instance_uuid):
+ """Inject network info, but don't return the info."""
+ self._inject_network_info(context, instance_uuid)
+
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def get_console_output(self, context, instance=None, instance_uuid=None,
@@ -2013,7 +2014,6 @@ class ComputeManager(manager.SchedulerDependentManager):
'volume_size': None,
'no_device': None}
self.db.block_device_mapping_create(context, values)
- return True
def _detach_volume(self, context, instance, bdm):
"""Do the actual driver detach using block device mapping."""
@@ -2046,7 +2046,6 @@ class ComputeManager(manager.SchedulerDependentManager):
self.volume_api.detach(context.elevated(), volume)
self.db.block_device_mapping_destroy_by_instance_and_volume(
context, instance_uuid, volume_id)
- return True
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def remove_volume_connection(self, context, instance_id, volume_id):
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index e2c736c54..030dab490 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -1041,17 +1041,24 @@ class ComputeTestCase(BaseTestCase):
None,
is_admin=False)
- # decorator should return False (fail) with locked nonadmin context
+ def check_task_state(task_state):
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['task_state'], task_state)
+
+ db.instance_update(self.context, instance_uuid,
+ {'task_state': task_states.REBOOTING})
+
+ # should fail with locked nonadmin context, task_state won't be cleared
self.compute_api.lock(self.context, instance)
- ret_val = self.compute.reboot_instance(non_admin_context,
+ self.compute.reboot_instance(non_admin_context,
instance=jsonutils.to_primitive(instance))
- self.assertEqual(ret_val, False)
+ check_task_state(task_states.REBOOTING)
- # decorator should return None (success) with unlocked nonadmin context
+ # should succeed with unlocked nonadmin context, task_state cleared
self.compute_api.unlock(self.context, instance)
- ret_val = self.compute.reboot_instance(non_admin_context,
+ self.compute.reboot_instance(non_admin_context,
instance=jsonutils.to_primitive(instance))
- self.assertEqual(ret_val, None)
+ check_task_state(None)
self.compute.terminate_instance(self.context, instance_uuid)