diff options
| author | Brian Waldon <brian.waldon@rackspace.com> | 2011-04-22 18:21:32 +0000 |
|---|---|---|
| committer | Tarmac <> | 2011-04-22 18:21:32 +0000 |
| commit | 7e01d47e887fe96b997ba16013022112d71ea62a (patch) | |
| tree | af694b980312da8eb2189c3f4346c9b5f473a562 | |
| parent | f2ed04d5a0034328bb02a1cb81784f8956652052 (diff) | |
| parent | 7168812fdf56280f24dc977c5dd9c7a73959c2a2 (diff) | |
| download | nova-7e01d47e887fe96b997ba16013022112d71ea62a.tar.gz nova-7e01d47e887fe96b997ba16013022112d71ea62a.tar.xz nova-7e01d47e887fe96b997ba16013022112d71ea62a.zip | |
Refactoring nova.exception.Invalid usage
| -rw-r--r-- | nova/api/openstack/servers.py | 5 | ||||
| -rw-r--r-- | nova/exception.py | 106 | ||||
| -rw-r--r-- | nova/network/vmwareapi_net.py | 16 | ||||
| -rw-r--r-- | nova/scheduler/driver.py | 28 | ||||
| -rw-r--r-- | nova/tests/test_scheduler.py | 69 | ||||
| -rw-r--r-- | nova/tests/test_virt.py | 2 | ||||
| -rw-r--r-- | nova/virt/libvirt_conn.py | 19 | ||||
| -rw-r--r-- | nova/virt/vmwareapi/vmops.py | 13 | ||||
| -rw-r--r-- | setup.py | 2 |
9 files changed, 158 insertions, 102 deletions
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 415c0995f..88128fd1f 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -564,9 +564,8 @@ class Controller(common.OpenstackController): """ image_id = image_meta['id'] if image_meta['status'] != 'active': - raise exception.Invalid( - _("Cannot build from image %(image_id)s, status not active") % - locals()) + raise exception.ImageUnacceptable(image_id=image_id, + reason=_("status is not active")) if image_meta.get('container_format') != 'ami': return None, None diff --git a/nova/exception.py b/nova/exception.py index 3123b2f1f..01a9d183a 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -83,10 +83,6 @@ class NotEmpty(Error): pass -class Invalid(Error): - pass - - class InvalidInputException(Error): pass @@ -130,3 +126,105 @@ def wrap_exception(f): raise _wrap.func_name = f.func_name return _wrap + + +class NovaException(Exception): + """Base Nova Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + + """ + message = _("An unknown exception occurred.") + + def __init__(self, **kwargs): + try: + self._error_string = self.message % kwargs + + except Exception: + # at least get the core message out if something happened + self._error_string = self.message + + def __str__(self): + return self._error_string + + +#TODO(bcwaldon): EOL this exception! +class Invalid(NovaException): + pass + + +class InstanceNotRunning(Invalid): + message = _("Instance %(instance_id)s is not running.") + + +class InstanceNotSuspended(Invalid): + message = _("Instance %(instance_id)s is not suspended.") + + +class InstanceSuspendFailure(Invalid): + message = _("Failed to suspend instance") + ": %(reason)s" + + +class InstanceResumeFailure(Invalid): + message = _("Failed to resume server") + ": %(reason)s." + + +class InstanceRebootFailure(Invalid): + message = _("Failed to reboot instance") + ": %(reason)s" + + +class ServiceUnavailable(Invalid): + message = _("Service is unavailable at this time.") + + +class VolumeServiceUnavailable(ServiceUnavailable): + message = _("Volume service is unavailable at this time.") + + +class ComputeServiceUnavailable(ServiceUnavailable): + message = _("Compute service is unavailable at this time.") + + +class UnableToMigrateToSelf(Invalid): + message = _("Unable to migrate instance (%(instance_id)s) " + "to current host (%(host)s).") + + +class SourceHostUnavailable(Invalid): + message = _("Original compute host is unavailable at this time.") + + +class InvalidHypervisorType(Invalid): + message = _("The supplied hypervisor type of is invalid.") + + +class DestinationHypervisorTooOld(Invalid): + message = _("The instance requires a newer hypervisor version than " + "has been provided.") + + +class InvalidDevicePath(Invalid): + message = _("The supplied device path (%(path)s) is invalid.") + + +class InvalidCPUInfo(Invalid): + message = _("Unacceptable CPU info") + ": %(reason)s" + + +class InvalidVLANTag(Invalid): + message = _("VLAN tag is not appropriate for the port group " + "%(bridge)s. Expected VLAN tag is %(tag)s, " + "but the one associated with the port group is %(pgroup)s.") + + +class InvalidVLANPortGroup(Invalid): + message = _("vSwitch which contains the port group %(bridge)s is " + "not associated with the desired physical adapter. " + "Expected vSwitch is %(expected)s, but the one associated " + "is %(actual)s.") + + +class ImageUnacceptable(Invalid): + message = _("Image %(image_id)s is unacceptable") + ": %(reason)s" diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py index 93e6584f0..6e1ed480b 100644 --- a/nova/network/vmwareapi_net.py +++ b/nova/network/vmwareapi_net.py @@ -75,17 +75,13 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): pg_vlanid, pg_vswitch = \ network_utils.get_vlanid_and_vswitch_for_portgroup(session, bridge) - # Check if the vsiwtch associated is proper + # Check if the vswitch associated is proper if pg_vswitch != vswitch_associated: - raise exception.Invalid(_("vSwitch which contains the port group " - "%(bridge)s is not associated with the desired " - "physical adapter. Expected vSwitch is " - "%(vswitch_associated)s, but the one associated" - " is %(pg_vswitch)s") % locals()) + raise exception.InvalidVLANPortGroup(bridge=bridge, + expected=vswitch_associated, + actual=pg_vswitch) # Check if the vlan id is proper for the port group if pg_vlanid != vlan_num: - raise exception.Invalid(_("VLAN tag is not appropriate for the " - "port group %(bridge)s. Expected VLAN tag is " - "%(vlan_num)s, but the one associated with the " - "port group is %(pg_vlanid)s") % locals()) + raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num, + pgroup=pg_vlanid) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index ce05d9f6a..87b10e940 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -129,15 +129,14 @@ class Scheduler(object): if (power_state.RUNNING != instance_ref['state'] or \ 'running' != instance_ref['state_description']): ec2_id = instance_ref['hostname'] - raise exception.Invalid(_('Instance(%s) is not running') % ec2_id) + raise exception.InstanceNotRunning(instance_id=ec2_id) # Checing volume node is running when any volumes are mounted # to the instance. if len(instance_ref['volumes']) != 0: services = db.service_get_all_by_topic(context, 'volume') if len(services) < 1 or not self.service_is_up(services[0]): - raise exception.Invalid(_("volume node is not alive" - "(time synchronize problem?)")) + raise exception.VolumeServiceUnavailable() # Checking src host exists and compute node src = instance_ref['host'] @@ -145,8 +144,7 @@ class Scheduler(object): # Checking src host is alive. if not self.service_is_up(services[0]): - raise exception.Invalid(_("%s is not alive(time " - "synchronize problem?)") % src) + raise exception.ComputeServiceUnavailable(host=src) def _live_migration_dest_check(self, context, instance_ref, dest): """Live migration check routine (for destination host). @@ -163,17 +161,15 @@ class Scheduler(object): # Checking dest host is alive. if not self.service_is_up(dservice_ref): - raise exception.Invalid(_("%s is not alive(time " - "synchronize problem?)") % dest) + raise exception.ComputeServiceUnavailable(host=dest) # Checking whether The host where instance is running # and dest is not same. src = instance_ref['host'] if dest == src: ec2_id = instance_ref['hostname'] - raise exception.Invalid(_("%(dest)s is where %(ec2_id)s is " - "running now. choose other host.") - % locals()) + raise exception.UnableToMigrateToSelf(instance_id=ec2_id, + host=dest) # Checking dst host still has enough capacities. self.assert_compute_node_has_enough_resources(context, @@ -204,26 +200,20 @@ class Scheduler(object): oservice_refs = db.service_get_all_compute_by_host(context, instance_ref['launched_on']) except exception.NotFound: - raise exception.Invalid(_("host %s where instance was launched " - "does not exist.") - % instance_ref['launched_on']) + raise exception.SourceHostUnavailable() oservice_ref = oservice_refs[0]['compute_node'][0] # Checking hypervisor is same. orig_hypervisor = oservice_ref['hypervisor_type'] dest_hypervisor = dservice_ref['hypervisor_type'] if orig_hypervisor != dest_hypervisor: - raise exception.Invalid(_("Different hypervisor type" - "(%(orig_hypervisor)s->" - "%(dest_hypervisor)s)')" % locals())) + raise exception.InvalidHypervisorType() # Checkng hypervisor version. orig_hypervisor = oservice_ref['hypervisor_version'] dest_hypervisor = dservice_ref['hypervisor_version'] if orig_hypervisor > dest_hypervisor: - raise exception.Invalid(_("Older hypervisor version" - "(%(orig_hypervisor)s->" - "%(dest_hypervisor)s)") % locals()) + raise exception.DestinationHypervisorTooOld() # Checking cpuinfo. try: diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 51d987288..42ea19d6e 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -698,14 +698,10 @@ class SimpleDriverTestCase(test.TestCase): 'topic': 'volume', 'report_count': 0} s_ref = db.service_create(self.context, dic) - try: - self.scheduler.driver.schedule_live_migration(self.context, - instance_id, - i_ref['host']) - except exception.Invalid, e: - c = (e.message.find('volume node is not alive') >= 0) + self.assertRaises(exception.VolumeServiceUnavailable, + self.scheduler.driver.schedule_live_migration, + self.context, instance_id, i_ref['host']) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.volume_destroy(self.context, v_ref['id']) @@ -718,13 +714,10 @@ class SimpleDriverTestCase(test.TestCase): s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) - try: - self.scheduler.driver._live_migration_src_check(self.context, - i_ref) - except exception.Invalid, e: - c = (e.message.find('is not alive') >= 0) + self.assertRaises(exception.ComputeServiceUnavailable, + self.scheduler.driver._live_migration_src_check, + self.context, i_ref) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -749,14 +742,10 @@ class SimpleDriverTestCase(test.TestCase): s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) - try: - self.scheduler.driver._live_migration_dest_check(self.context, - i_ref, - i_ref['host']) - except exception.Invalid, e: - c = (e.message.find('is not alive') >= 0) + self.assertRaises(exception.ComputeServiceUnavailable, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, i_ref['host']) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -766,14 +755,10 @@ class SimpleDriverTestCase(test.TestCase): i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host=i_ref['host']) - try: - self.scheduler.driver._live_migration_dest_check(self.context, - i_ref, - i_ref['host']) - except exception.Invalid, e: - c = (e.message.find('choose other host') >= 0) + self.assertRaises(exception.UnableToMigrateToSelf, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, i_ref['host']) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -837,14 +822,10 @@ class SimpleDriverTestCase(test.TestCase): "args": {'filename': fpath}}) self.mox.ReplayAll() - try: - self.scheduler.driver._live_migration_common_check(self.context, - i_ref, - dest) - except exception.Invalid, e: - c = (e.message.find('does not exist') >= 0) + self.assertRaises(exception.SourceHostUnavailable, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -865,14 +846,10 @@ class SimpleDriverTestCase(test.TestCase): driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.ReplayAll() - try: - self.scheduler.driver._live_migration_common_check(self.context, - i_ref, - dest) - except exception.Invalid, e: - c = (e.message.find(_('Different hypervisor type')) >= 0) + self.assertRaises(exception.InvalidHypervisorType, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) @@ -895,14 +872,10 @@ class SimpleDriverTestCase(test.TestCase): driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.ReplayAll() - try: - self.scheduler.driver._live_migration_common_check(self.context, - i_ref, - dest) - except exception.Invalid, e: - c = (e.message.find(_('Older hypervisor version')) >= 0) + self.assertRaises(exception.DestinationHypervisorTooOld, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 0a0c7a958..d99ffce95 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -451,7 +451,7 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) - self.assertRaises(exception.Invalid, + self.assertRaises(exception.ComputeServiceUnavailable, conn.update_available_resource, self.context, 'dummy') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index e6671878d..7a78ce9e2 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -404,7 +404,7 @@ class LibvirtConnection(driver.ComputeDriver): name, mount_device) else: - raise exception.Invalid(_("Invalid device path %s") % device_path) + raise exception.InvalidDevicePath(path=device_path) virt_dom.attachDevice(xml) @@ -1319,9 +1319,9 @@ class LibvirtConnection(driver.ComputeDriver): xml = libxml2.parseDoc(xml) nodes = xml.xpathEval('//host/cpu') if len(nodes) != 1: - raise exception.Invalid(_("Invalid xml. '<cpu>' must be 1," - "but %d\n") % len(nodes) - + xml.serialize()) + reason = _("'<cpu>' must be 1, but %d\n") % len(nodes) + reason += xml.serialize() + raise exception.InvalidCPUInfo(reason=reason) cpu_info = dict() @@ -1350,9 +1350,8 @@ class LibvirtConnection(driver.ComputeDriver): tkeys = topology.keys() if set(tkeys) != set(keys): ks = ', '.join(keys) - raise exception.Invalid(_("Invalid xml: topology" - "(%(topology)s) must have " - "%(ks)s") % locals()) + reason = _("topology (%(topology)s) must have %(ks)s") + raise exception.InvalidCPUInfo(reason=reason % locals()) feature_nodes = xml.xpathEval('//host/cpu/feature') features = list() @@ -1407,9 +1406,7 @@ class LibvirtConnection(driver.ComputeDriver): try: service_ref = db.service_get_all_compute_by_host(ctxt, host)[0] except exception.NotFound: - raise exception.Invalid(_("Cannot update compute manager " - "specific info, because no service " - "record was found.")) + raise exception.ComputeServiceUnavailable(host=host) # Updating host information dic = {'vcpus': self.get_vcpu_total(), @@ -1462,7 +1459,7 @@ class LibvirtConnection(driver.ComputeDriver): raise if ret <= 0: - raise exception.Invalid(m % locals()) + raise exception.InvalidCPUInfo(reason=m % locals()) return diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index cf6c88bbd..b700c438f 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -501,8 +501,8 @@ class VMWareVMOps(object): # Raise an exception if the VM is not powered On.
if pwr_state not in ["poweredOn"]:
- raise exception.Invalid(_("instance - %s not poweredOn. So can't "
- "be rebooted.") % instance.name)
+ reason = _("instance is not powered on")
+ raise exception.InstanceRebootFailure(reason=reason)
# If latest vmware tools are installed in the VM, and that the tools
# are running, then only do a guest reboot. Otherwise do a hard reset.
@@ -620,8 +620,9 @@ class VMWareVMOps(object): LOG.debug(_("Suspended the VM %s ") % instance.name)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
- raise exception.Invalid(_("instance - %s is poweredOff and hence "
- " can't be suspended.") % instance.name)
+ reason = _("instance is powered off and can not be suspended.")
+ raise exception.InstanceSuspendFailure(reason=reason)
+
LOG.debug(_("VM %s was already in suspended state. So returning "
"without doing anything") % instance.name)
@@ -643,8 +644,8 @@ class VMWareVMOps(object): self._wait_with_callback(instance.id, suspend_task, callback)
LOG.debug(_("Resumed the VM %s ") % instance.name)
else:
- raise exception.Invalid(_("instance - %s not in Suspended state "
- "and hence can't be Resumed.") % instance.name)
+ reason = _("instance is not in a suspended state")
+ raise exception.InstanceResumeFailure(reason=reason)
def get_info(self, instance_name):
"""Return data about the VM instance."""
@@ -16,6 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. +import gettext import glob import os import subprocess @@ -33,6 +34,7 @@ except ImportError: assert DistUtilsExtra.auto.__version__ >= '2.18',\ 'needs DistUtilsExtra.auto >= 2.18' +gettext.install('nova', unicode=1) from nova.utils import parse_mailmap, str_dict_replace from nova import version |
