diff options
| author | Sandy Walsh <sandy.walsh@rackspace.com> | 2011-07-13 18:36:47 +0000 |
|---|---|---|
| committer | Tarmac <> | 2011-07-13 18:36:47 +0000 |
| commit | 326074c903e8ec03b28154ea2547cf1dd00cdbac (patch) | |
| tree | 7419885689c0f68363742c27b82ac4a917ec0c41 | |
| parent | b58e853038e9c322be765600e225568689e5c479 (diff) | |
| parent | db13df67f895c8e9b20e9faf4c488035c9f43e94 (diff) | |
| download | nova-326074c903e8ec03b28154ea2547cf1dd00cdbac.tar.gz nova-326074c903e8ec03b28154ea2547cf1dd00cdbac.tar.xz nova-326074c903e8ec03b28154ea2547cf1dd00cdbac.zip | |
Extends the exception.wrap_exception decorator to optionally send an update to the notification system in the event of a failure.
| -rw-r--r-- | nova/compute/manager.py | 118 | ||||
| -rw-r--r-- | nova/console/manager.py | 4 | ||||
| -rw-r--r-- | nova/console/vmrc_manager.py | 4 | ||||
| -rw-r--r-- | nova/exception.py | 59 | ||||
| -rw-r--r-- | nova/notifier/api.py | 14 | ||||
| -rw-r--r-- | nova/rpc.py | 2 | ||||
| -rw-r--r-- | nova/scheduler/driver.py | 1 | ||||
| -rw-r--r-- | nova/tests/test_exception.py | 63 | ||||
| -rw-r--r-- | nova/virt/libvirt/connection.py | 30 |
9 files changed, 202 insertions, 93 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c627d2985..960dfea54 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -54,7 +54,7 @@ from nova import rpc from nova import utils from nova import volume from nova.compute import power_state -from nova.notifier import api as notifier_api +from nova.notifier import api as notifier from nova.compute.utils import terminate_volumes from nova.virt import driver @@ -85,6 +85,10 @@ flags.DEFINE_integer('host_state_interval', 120, LOG = logging.getLogger('nova.compute.manager') +def publisher_id(host=None): + return notifier.publisher_id("compute", host) + + def checks_instance_lock(function): """Decorator to prevent action against locked instances for non-admins.""" @functools.wraps(function) @@ -183,7 +187,7 @@ class ComputeManager(manager.SchedulerDependentManager): def get_console_pool_info(self, context, console_type): return self.driver.get_console_pool_info(console_type) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def refresh_security_group_rules(self, context, security_group_id, **kwargs): """Tell the virtualization driver to refresh security group rules. @@ -193,7 +197,7 @@ class ComputeManager(manager.SchedulerDependentManager): """ return self.driver.refresh_security_group_rules(security_group_id) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def refresh_security_group_members(self, context, security_group_id, **kwargs): """Tell the virtualization driver to refresh security group members. @@ -203,7 +207,7 @@ class ComputeManager(manager.SchedulerDependentManager): """ return self.driver.refresh_security_group_members(security_group_id) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def refresh_provider_fw_rules(self, context, **_kwargs): """This call passes straight through to the virtualization driver.""" return self.driver.refresh_provider_fw_rules() @@ -318,10 +322,9 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_launched_at(context, instance_id) self._update_state(context, instance_id) usage_info = utils.usage_from_instance(instance) - notifier_api.notify('compute.%s' % self.host, - 'compute.instance.create', - notifier_api.INFO, - usage_info) + notifier.notify('compute.%s' % self.host, + 'compute.instance.create', + notifier.INFO, usage_info) except exception.InstanceNotFound: # FIXME(wwolf): We are just ignoring InstanceNotFound # exceptions here in case the instance was immediately @@ -329,11 +332,11 @@ class ComputeManager(manager.SchedulerDependentManager): # be fixed once we have no-db-messaging pass - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def run_instance(self, context, instance_id, **kwargs): self._run_instance(context, instance_id, **kwargs) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def start_instance(self, context, instance_id): """Starting an instance on this host.""" @@ -366,7 +369,7 @@ class ComputeManager(manager.SchedulerDependentManager): if action_str == 'Terminating': terminate_volumes(self.db, context, instance_id) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def terminate_instance(self, context, instance_id): """Terminate an instance on this host.""" @@ -376,19 +379,18 @@ class ComputeManager(manager.SchedulerDependentManager): # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) usage_info = utils.usage_from_instance(instance) - notifier_api.notify('compute.%s' % self.host, - 'compute.instance.delete', - notifier_api.INFO, - usage_info) + notifier.notify('compute.%s' % self.host, + 'compute.instance.delete', + notifier.INFO, usage_info) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def stop_instance(self, context, instance_id): """Stopping an instance on this host.""" self._shutdown_instance(context, instance_id, 'Stopping') # instance state will be updated to stopped by _poll_instance_states() - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def rebuild_instance(self, context, instance_id, **kwargs): """Destroy and re-make this instance. @@ -418,12 +420,12 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id) usage_info = utils.usage_from_instance(instance_ref, image_ref=image_ref) - notifier_api.notify('compute.%s' % self.host, + notifier.notify('compute.%s' % self.host, 'compute.instance.rebuild', - notifier_api.INFO, + notifier.INFO, usage_info) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def reboot_instance(self, context, instance_id): """Reboot an instance on this host.""" @@ -448,7 +450,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.reboot(instance_ref) self._update_state(context, instance_id) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def snapshot_instance(self, context, instance_id, image_id, image_type='snapshot', backup_type=None, rotation=None): @@ -540,7 +542,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.debug(_("Deleting image %d" % image_id)) image_service.delete(context, image_id) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def set_admin_password(self, context, instance_id, new_pass=None): """Set the root/admin password for an instance on this host. @@ -588,7 +590,7 @@ class ComputeManager(manager.SchedulerDependentManager): time.sleep(1) continue - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def inject_file(self, context, instance_id, path, file_contents): """Write a file to the specified path in an instance on this host.""" @@ -606,7 +608,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.audit(msg) self.driver.inject_file(instance_ref, path, file_contents) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def agent_update(self, context, instance_id, url, md5hash): """Update agent running on an instance on this host.""" @@ -624,7 +626,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.audit(msg) self.driver.agent_update(instance_ref, url, md5hash) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def rescue_instance(self, context, instance_id): """Rescue an instance on this host.""" @@ -641,7 +643,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.rescue(instance_ref, _update_state) self._update_state(context, instance_id) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def unrescue_instance(self, context, instance_id): """Rescue an instance on this host.""" @@ -662,7 +664,7 @@ class ComputeManager(manager.SchedulerDependentManager): """Update instance state when async task completes.""" self._update_state(context, instance_id) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def confirm_resize(self, context, instance_id, migration_id): """Destroys the source instance.""" @@ -670,12 +672,12 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get(context, instance_id) self.driver.destroy(instance_ref) usage_info = utils.usage_from_instance(instance_ref) - notifier_api.notify('compute.%s' % self.host, + notifier.notify('compute.%s' % self.host, 'compute.instance.resize.confirm', - notifier_api.INFO, + notifier.INFO, usage_info) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def revert_resize(self, context, instance_id, migration_id): """Destroys the new instance on the destination machine. @@ -697,7 +699,7 @@ class ComputeManager(manager.SchedulerDependentManager): 'instance_id': instance_id, }, }) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def finish_revert_resize(self, context, instance_id, migration_id): """Finishes the second half of reverting a resize. @@ -722,12 +724,12 @@ class ComputeManager(manager.SchedulerDependentManager): self.db.migration_update(context, migration_id, {'status': 'reverted'}) usage_info = utils.usage_from_instance(instance_ref) - notifier_api.notify('compute.%s' % self.host, + notifier.notify('compute.%s' % self.host, 'compute.instance.resize.revert', - notifier_api.INFO, + notifier.INFO, usage_info) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def prep_resize(self, context, instance_id, flavor_id): """Initiates the process of moving a running instance to another host. @@ -765,12 +767,12 @@ class ComputeManager(manager.SchedulerDependentManager): usage_info = utils.usage_from_instance(instance_ref, new_instance_type=instance_type['name'], new_instance_type_id=instance_type['id']) - notifier_api.notify('compute.%s' % self.host, + notifier.notify('compute.%s' % self.host, 'compute.instance.resize.prep', - notifier_api.INFO, + notifier.INFO, usage_info) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def resize_instance(self, context, instance_id, migration_id): """Starts the migration of a running instance to another host.""" @@ -796,7 +798,7 @@ class ComputeManager(manager.SchedulerDependentManager): 'instance_id': instance_id, 'disk_info': disk_info}}) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def finish_resize(self, context, instance_id, migration_id, disk_info): """Completes the migration process. @@ -828,7 +830,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.db.migration_update(context, migration_id, {'status': 'finished', }) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def add_fixed_ip_to_instance(self, context, instance_id, network_id): """Calls network_api to add new fixed_ip to instance @@ -840,7 +842,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.inject_network_info(context, instance_id) self.reset_network(context, instance_id) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def remove_fixed_ip_from_instance(self, context, instance_id, address): """Calls network_api to remove existing fixed_ip from instance @@ -852,7 +854,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.inject_network_info(context, instance_id) self.reset_network(context, instance_id) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def pause_instance(self, context, instance_id): """Pause an instance on this host.""" @@ -869,7 +871,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, result)) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def unpause_instance(self, context, instance_id): """Unpause a paused instance on this host.""" @@ -886,13 +888,13 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, result)) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def set_host_enabled(self, context, instance_id=None, host=None, enabled=None): """Sets the specified host's ability to accept new instances.""" return self.driver.set_host_enabled(host, enabled) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for an instance on this host.""" instance_ref = self.db.instance_get(context, instance_id) @@ -901,7 +903,7 @@ class ComputeManager(manager.SchedulerDependentManager): context=context) return self.driver.get_diagnostics(instance_ref) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def suspend_instance(self, context, instance_id): """Suspend the given instance.""" @@ -917,7 +919,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, result)) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def resume_instance(self, context, instance_id): """Resume the given suspended instance.""" @@ -933,7 +935,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, result)) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def lock_instance(self, context, instance_id): """Lock the given instance.""" context = context.elevated() @@ -941,7 +943,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.debug(_('instance %s: locking'), instance_id, context=context) self.db.instance_update(context, instance_id, {'locked': True}) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def unlock_instance(self, context, instance_id): """Unlock the given instance.""" context = context.elevated() @@ -949,7 +951,7 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.debug(_('instance %s: unlocking'), instance_id, context=context) self.db.instance_update(context, instance_id, {'locked': False}) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def get_lock(self, context, instance_id): """Return the boolean state of the given instance's lock.""" context = context.elevated() @@ -978,7 +980,7 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.inject_network_info(instance, network_info) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def get_console_output(self, context, instance_id): """Send the console output for the given instance.""" context = context.elevated() @@ -988,7 +990,7 @@ class ComputeManager(manager.SchedulerDependentManager): output = self.driver.get_console_output(instance_ref) return output.decode('utf-8', 'replace').encode('ascii', 'replace') - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def get_ajax_console(self, context, instance_id): """Return connection information for an ajax console.""" context = context.elevated() @@ -996,7 +998,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get(context, instance_id) return self.driver.get_ajax_console(instance_ref) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def get_vnc_console(self, context, instance_id): """Return connection information for a vnc console.""" context = context.elevated() @@ -1059,7 +1061,7 @@ class ComputeManager(manager.SchedulerDependentManager): return True - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def _detach_volume(self, context, instance_id, volume_id, destroy_bdm): """Detach a volume from an instance.""" @@ -1094,7 +1096,7 @@ class ComputeManager(manager.SchedulerDependentManager): """ self.volume_manager.remove_compute_volume(context, volume_id) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def compare_cpu(self, context, cpu_info): """Checks that the host cpu is compatible with a cpu given by xml. @@ -1105,7 +1107,7 @@ class ComputeManager(manager.SchedulerDependentManager): """ return self.driver.compare_cpu(cpu_info) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def create_shared_storage_test_file(self, context): """Makes tmpfile under FLAGS.instance_path. @@ -1125,7 +1127,7 @@ class ComputeManager(manager.SchedulerDependentManager): os.close(fd) return os.path.basename(tmp_file) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def check_shared_storage_test_file(self, context, filename): """Confirms existence of the tmpfile under FLAGS.instances_path. @@ -1137,7 +1139,7 @@ class ComputeManager(manager.SchedulerDependentManager): if not os.path.exists(tmp_file): raise exception.FileNotFound(file_path=tmp_file) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def cleanup_shared_storage_test_file(self, context, filename): """Removes existence of the tmpfile under FLAGS.instances_path. @@ -1148,7 +1150,7 @@ class ComputeManager(manager.SchedulerDependentManager): tmp_file = os.path.join(FLAGS.instances_path, filename) os.remove(tmp_file) - @exception.wrap_exception + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def update_available_resource(self, context): """See comments update_resource_info. diff --git a/nova/console/manager.py b/nova/console/manager.py index e0db21666..2c823b763 100644 --- a/nova/console/manager.py +++ b/nova/console/manager.py @@ -56,7 +56,7 @@ class ConsoleProxyManager(manager.Manager): def init_host(self): self.driver.init_host() - @exception.wrap_exception + @exception.wrap_exception() def add_console(self, context, instance_id, password=None, port=None, **kwargs): instance = self.db.instance_get(context, instance_id) @@ -83,7 +83,7 @@ class ConsoleProxyManager(manager.Manager): self.driver.setup_console(context, console) return console['id'] - @exception.wrap_exception + @exception.wrap_exception() def remove_console(self, context, console_id, **_kwargs): try: console = self.db.console_get(context, console_id) diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py index acecc1075..0b5ce4a49 100644 --- a/nova/console/vmrc_manager.py +++ b/nova/console/vmrc_manager.py @@ -77,7 +77,7 @@ class ConsoleVMRCManager(manager.Manager): self.driver.setup_console(context, console) return console - @exception.wrap_exception + @exception.wrap_exception() def add_console(self, context, instance_id, password=None, port=None, **kwargs): """Adds a console for the instance. @@ -107,7 +107,7 @@ class ConsoleVMRCManager(manager.Manager): instance) return console['id'] - @exception.wrap_exception + @exception.wrap_exception() def remove_console(self, context, console_id, **_kwargs): """Removes a console entry.""" try: diff --git a/nova/exception.py b/nova/exception.py index 988940d6a..ad6c005f8 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -24,8 +24,9 @@ SHOULD include dedicated exception logging. """ -from nova import log as logging +from functools import wraps +from nova import log as logging LOG = logging.getLogger('nova.exception') @@ -81,19 +82,49 @@ def wrap_db_error(f): _wrap.func_name = f.func_name -def wrap_exception(f): - def _wrap(*args, **kw): - try: - return f(*args, **kw) - except Exception, e: - if not isinstance(e, Error): - #exc_type, exc_value, exc_traceback = sys.exc_info() - LOG.exception(_('Uncaught exception')) - #logging.error(traceback.extract_stack(exc_traceback)) - raise Error(str(e)) - raise - _wrap.func_name = f.func_name - return _wrap +def wrap_exception(notifier=None, publisher_id=None, event_type=None, + level=None): + """This decorator wraps a method to catch any exceptions that may + get thrown. It logs the exception as well as optionally sending + it to the notification system. + """ + # TODO(sandy): Find a way to import nova.notifier.api so we don't have + # to pass it in as a parameter. Otherwise we get a cyclic import of + # nova.notifier.api -> nova.utils -> nova.exception :( + def inner(f): + def wrapped(*args, **kw): + try: + return f(*args, **kw) + except Exception, e: + if notifier: + payload = dict(args=args, exception=e) + payload.update(kw) + + # Use a temp vars so we don't shadow + # our outer definitions. + temp_level = level + if not temp_level: + temp_level = notifier.ERROR + + temp_type = event_type + if not temp_type: + # If f has multiple decorators, they must use + # functools.wraps to ensure the name is + # propagated. + temp_type = f.__name__ + + notifier.notify(publisher_id, temp_type, temp_level, + payload) + + if not isinstance(e, Error): + #exc_type, exc_value, exc_traceback = sys.exc_info() + LOG.exception(_('Uncaught exception')) + #logging.error(traceback.extract_stack(exc_traceback)) + raise Error(str(e)) + raise + + return wraps(f)(wrapped) + return inner class NovaException(Exception): diff --git a/nova/notifier/api.py b/nova/notifier/api.py index d49517c8b..98969fd3e 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -17,7 +17,9 @@ import uuid from nova import flags from nova import utils +from nova import log as logging +LOG = logging.getLogger('nova.exception') FLAGS = flags.FLAGS @@ -37,6 +39,12 @@ class BadPriorityException(Exception): pass +def publisher_id(service, host=None): + if not host: + host = FLAGS.host + return "%s.%s" % (service, host) + + def notify(publisher_id, event_type, priority, payload): """ Sends a notification using the specified driver @@ -79,4 +87,8 @@ def notify(publisher_id, event_type, priority, payload): priority=priority, payload=payload, timestamp=str(utils.utcnow())) - driver.notify(msg) + try: + driver.notify(msg) + except Exception, e: + LOG.exception(_("Problem '%(e)s' attempting to " + "send to notification system." % locals())) diff --git a/nova/rpc.py b/nova/rpc.py index f52f377b0..e2771ca88 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -219,7 +219,7 @@ class AdapterConsumer(Consumer): return self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) - @exception.wrap_exception + @exception.wrap_exception() def _process_data(self, msg_id, ctxt, method, args): """Thread that maigcally looks for a method on the proxy object and calls it. diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index d4a30255d..1bfa7740a 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -31,6 +31,7 @@ from nova import rpc from nova import utils from nova.compute import power_state + FLAGS = flags.FLAGS flags.DEFINE_integer('service_down_time', 60, 'maximum time since last checkin for up service') diff --git a/nova/tests/test_exception.py b/nova/tests/test_exception.py index 4d3b9cc73..cd74f8871 100644 --- a/nova/tests/test_exception.py +++ b/nova/tests/test_exception.py @@ -32,3 +32,66 @@ class ApiErrorTestCase(test.TestCase): self.assertEqual(err.__str__(), 'blah code: fake error') self.assertEqual(err.code, 'blah code') self.assertEqual(err.msg, 'fake error') + + +class FakeNotifier(object): + """Acts like the nova.notifier.api module.""" + ERROR = 88 + + def __init__(self): + self.provided_publisher = None + self.provided_event = None + self.provided_priority = None + self.provided_payload = None + + def notify(self, publisher, event, priority, payload): + self.provided_publisher = publisher + self.provided_event = event + self.provided_priority = priority + self.provided_payload = payload + + +def good_function(): + return 99 + + +def bad_function_error(): + raise exception.Error() + + +def bad_function_exception(): + raise Exception() + + +class WrapExceptionTestCase(test.TestCase): + def test_wrap_exception_good_return(self): + wrapped = exception.wrap_exception() + self.assertEquals(99, wrapped(good_function)()) + + def test_wrap_exception_throws_error(self): + wrapped = exception.wrap_exception() + self.assertRaises(exception.Error, wrapped(bad_function_error)) + + def test_wrap_exception_throws_exception(self): + wrapped = exception.wrap_exception() + # Note that Exception is converted to Error ... + self.assertRaises(exception.Error, wrapped(bad_function_exception)) + + def test_wrap_exception_with_notifier(self): + notifier = FakeNotifier() + wrapped = exception.wrap_exception(notifier, "publisher", "event", + "level") + self.assertRaises(exception.Error, wrapped(bad_function_exception)) + self.assertEquals(notifier.provided_publisher, "publisher") + self.assertEquals(notifier.provided_event, "event") + self.assertEquals(notifier.provided_priority, "level") + for key in ['exception', 'args']: + self.assertTrue(key in notifier.provided_payload.keys()) + + def test_wrap_exception_with_notifier_defaults(self): + notifier = FakeNotifier() + wrapped = exception.wrap_exception(notifier) + self.assertRaises(exception.Error, wrapped(bad_function_exception)) + self.assertEquals(notifier.provided_publisher, None) + self.assertEquals(notifier.provided_event, "bad_function_exception") + self.assertEquals(notifier.provided_priority, notifier.ERROR) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index e912c2bec..977bb7dfe 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -331,7 +331,7 @@ class LibvirtConnection(driver.ComputeDriver): if os.path.exists(target): shutil.rmtree(target) - @exception.wrap_exception + @exception.wrap_exception() def attach_volume(self, instance_name, device_path, mountpoint): virt_dom = self._lookup_by_name(instance_name) mount_device = mountpoint.rpartition("/")[2] @@ -375,7 +375,7 @@ class LibvirtConnection(driver.ComputeDriver): if doc is not None: doc.freeDoc() - @exception.wrap_exception + @exception.wrap_exception() def detach_volume(self, instance_name, mountpoint): virt_dom = self._lookup_by_name(instance_name) mount_device = mountpoint.rpartition("/")[2] @@ -384,7 +384,7 @@ class LibvirtConnection(driver.ComputeDriver): raise exception.DiskNotFound(location=mount_device) virt_dom.detachDevice(xml) - @exception.wrap_exception + @exception.wrap_exception() def snapshot(self, instance, image_href): """Create snapshot from a running VM instance. @@ -460,7 +460,7 @@ class LibvirtConnection(driver.ComputeDriver): # Clean up shutil.rmtree(temp_dir) - @exception.wrap_exception + @exception.wrap_exception() def reboot(self, instance): """Reboot a virtual machine, given an instance reference. @@ -501,31 +501,31 @@ class LibvirtConnection(driver.ComputeDriver): timer = utils.LoopingCall(_wait_for_reboot) return timer.start(interval=0.5, now=True) - @exception.wrap_exception + @exception.wrap_exception() def pause(self, instance, callback): """Pause VM instance""" dom = self._lookup_by_name(instance.name) dom.suspend() - @exception.wrap_exception + @exception.wrap_exception() def unpause(self, instance, callback): """Unpause paused VM instance""" dom = self._lookup_by_name(instance.name) dom.resume() - @exception.wrap_exception + @exception.wrap_exception() def suspend(self, instance, callback): """Suspend the specified instance""" dom = self._lookup_by_name(instance.name) dom.managedSave(0) - @exception.wrap_exception + @exception.wrap_exception() def resume(self, instance, callback): """resume the specified instance""" dom = self._lookup_by_name(instance.name) dom.create() - @exception.wrap_exception + @exception.wrap_exception() def rescue(self, instance): """Loads a VM using rescue images. @@ -563,7 +563,7 @@ class LibvirtConnection(driver.ComputeDriver): timer = utils.LoopingCall(_wait_for_rescue) return timer.start(interval=0.5, now=True) - @exception.wrap_exception + @exception.wrap_exception() def unrescue(self, instance): """Reboot the VM which is being rescued back into primary images. @@ -573,13 +573,13 @@ class LibvirtConnection(driver.ComputeDriver): """ self.reboot(instance) - @exception.wrap_exception + @exception.wrap_exception() def poll_rescued_instances(self, timeout): pass # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) - @exception.wrap_exception + @exception.wrap_exception() def spawn(self, instance, network_info=None, block_device_mapping=None): xml = self.to_xml(instance, False, network_info=network_info, block_device_mapping=block_device_mapping) @@ -642,7 +642,7 @@ class LibvirtConnection(driver.ComputeDriver): LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals()) return contents - @exception.wrap_exception + @exception.wrap_exception() def get_console_output(self, instance): console_log = os.path.join(FLAGS.instances_path, instance['name'], 'console.log') @@ -663,7 +663,7 @@ class LibvirtConnection(driver.ComputeDriver): return self._dump_file(fpath) - @exception.wrap_exception + @exception.wrap_exception() def get_ajax_console(self, instance): def get_open_port(): start_port, end_port = FLAGS.ajaxterm_portrange.split("-") @@ -704,7 +704,7 @@ class LibvirtConnection(driver.ComputeDriver): def get_host_ip_addr(self): return FLAGS.my_ip - @exception.wrap_exception + @exception.wrap_exception() def get_vnc_console(self, instance): def get_vnc_port_for_instance(instance_name): virt_dom = self._lookup_by_name(instance_name) |
