summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLvov Maxim <usrleon@gmail.com>2011-07-15 11:46:33 +0400
committerLvov Maxim <usrleon@gmail.com>2011-07-15 11:46:33 +0400
commitcb172c1f085e7c22add4e0111bfc1fc199ace0bd (patch)
treecf2afe41478d34fab2134df70f9d605139c0ad4c
parentbfb4a870e44a90c004cd7d568eb35a50221c7bd5 (diff)
parentad700b0ecec0ffd8ed9c08caeb8f1f75fc4b482f (diff)
downloadnova-cb172c1f085e7c22add4e0111bfc1fc199ace0bd.tar.gz
nova-cb172c1f085e7c22add4e0111bfc1fc199ace0bd.tar.xz
nova-cb172c1f085e7c22add4e0111bfc1fc199ace0bd.zip
merge with trunk, resolve conflicts
-rw-r--r--Authors3
-rwxr-xr-xbin/nova-manage59
-rw-r--r--nova/api/openstack/common.py26
-rw-r--r--nova/api/openstack/images.py111
-rw-r--r--nova/api/openstack/views/images.py28
-rw-r--r--nova/api/openstack/wsgi.py23
-rw-r--r--nova/compute/manager.py118
-rw-r--r--nova/console/manager.py4
-rw-r--r--nova/console/vmrc_manager.py4
-rw-r--r--nova/exception.py59
-rw-r--r--nova/network/manager.py14
-rw-r--r--nova/notifier/api.py14
-rw-r--r--nova/rpc.py2
-rw-r--r--nova/scheduler/driver.py1
-rw-r--r--nova/tests/api/openstack/test_common.py57
-rw-r--r--nova/tests/api/openstack/test_images.py532
-rw-r--r--nova/tests/test_exception.py63
-rw-r--r--nova/virt/libvirt/connection.py30
-rw-r--r--nova/virt/xenapi/vmops.py12
19 files changed, 842 insertions, 318 deletions
diff --git a/Authors b/Authors
index 4aa65eea2..8ffb7d8d4 100644
--- a/Authors
+++ b/Authors
@@ -65,6 +65,7 @@ Masanori Itoh <itoumsn@nttdata.co.jp>
Matt Dietz <matt.dietz@rackspace.com>
Michael Gundlach <michael.gundlach@rackspace.com>
Mike Scherbakov <mihgen@gmail.com>
+Mohammed Naser <mnaser@vexxhost.com>
Monsyne Dragon <mdragon@rackspace.com>
Monty Taylor <mordred@inaugust.com>
MORITA Kazutaka <morita.kazutaka@gmail.com>
@@ -84,7 +85,9 @@ Ryan Lucio <rlucio@internap.com>
Salvatore Orlando <salvatore.orlando@eu.citrix.com>
Sandy Walsh <sandy.walsh@rackspace.com>
Sateesh Chodapuneedi <sateesh.chodapuneedi@citrix.com>
+Scott Moser <smoser@ubuntu.com>
Soren Hansen <soren.hansen@rackspace.com>
+Stephanie Reese <reese.sm@gmail.com>
Thierry Carrez <thierry@openstack.org>
Todd Willey <todd@ansolabs.com>
Trey Morris <trey.morris@rackspace.com>
diff --git a/bin/nova-manage b/bin/nova-manage
index f5c140fa9..2e9b52d74 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -449,8 +449,11 @@ class ProjectCommands(object):
except (exception.UserNotFound, exception.ProjectNotFound) as ex:
print ex
raise
- with open(filename, 'w') as f:
- f.write(rc)
+ if filename == "-":
+ sys.stdout.write(rc)
+ else:
+ with open(filename, 'w') as f:
+ f.write(rc)
@args('--user', dest="username", metavar='<username>', help='User name')
def list(self, username=None):
@@ -505,8 +508,11 @@ class ProjectCommands(object):
"""Exports credentials for project to a zip file"""
try:
zip_file = self.manager.get_credentials(user_id, project_id)
- with open(filename, 'w') as f:
- f.write(zip_file)
+ if filename == "-":
+ sys.stdout.write(zip_file)
+ else:
+ with open(filename, 'w') as f:
+ f.write(zip_file)
except (exception.UserNotFound, exception.ProjectNotFound) as ex:
print ex
raise
@@ -662,17 +668,19 @@ class NetworkCommands(object):
def list(self):
"""List all created networks"""
- print "%-18s\t%-15s\t%-15s\t%-15s\t%-15s" % (_('network'),
- _('netmask'),
- _('start address'),
- 'DNS',
- 'project')
+ print "%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" % (_('network'),
+ _('netmask'),
+ _('start address'),
+ _('DNS'),
+ _('VlanID'),
+ 'project')
for network in db.network_get_all(context.get_admin_context()):
- print "%-18s\t%-15s\t%-15s\t%-15s\t%-15s" % (network.cidr,
- network.netmask,
- network.dhcp_start,
- network.dns,
- network.project_id)
+ print "%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" % (network.cidr,
+ network.netmask,
+ network.dhcp_start,
+ network.dns,
+ network.vlan,
+ network.project_id)
@args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>', help='Network to delete')
def delete(self, fixed_range):
@@ -854,6 +862,28 @@ class ServiceCommands(object):
{"method": "update_available_resource"})
+class HostCommands(object):
+ """List hosts"""
+
+ def list(self, zone=None):
+ """Show a list of all physical hosts. Filter by zone.
+ args: [zone]"""
+ print "%-25s\t%-15s" % (_('host'),
+ _('zone'))
+ ctxt = context.get_admin_context()
+ now = utils.utcnow()
+ services = db.service_get_all(ctxt)
+ if zone:
+ services = [s for s in services if s['availability_zone'] == zone]
+ hosts = []
+ for srv in services:
+ if not [h for h in hosts if h['host'] == srv['host']]:
+ hosts.append(srv)
+
+ for h in hosts:
+ print "%-25s\t%-15s" % (h['host'], h['availability_zone'])
+
+
class DbCommands(object):
"""Class for managing the database."""
@@ -1248,6 +1278,7 @@ CATEGORIES = [
('fixed', FixedIpCommands),
('flavor', InstanceTypeCommands),
('floating', FloatingIpCommands),
+ ('host', HostCommands),
('instance_type', InstanceTypeCommands),
('image', ImageCommands),
('network', NetworkCommands),
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 9aa384f33..8e12ce0c0 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -133,14 +133,32 @@ def get_id_from_href(href):
return int(urlparse(href).path.split('/')[-1])
except:
LOG.debug(_("Error extracting id from href: %s") % href)
- raise webob.exc.HTTPBadRequest(_('could not parse id from href'))
+ raise ValueError(_('could not parse id from href'))
-def remove_version_from_href(base_url):
- """Removes the api version from the href.
+def remove_version_from_href(href):
+ """Removes the first api version from the href.
Given: 'http://www.nova.com/v1.1/123'
Returns: 'http://www.nova.com/123'
+ Given: 'http://www.nova.com/v1.1'
+ Returns: 'http://www.nova.com'
+
"""
- return base_url.rsplit('/', 1).pop(0)
+ try:
+ #removes the first instance that matches /v#.#/
+ new_href = re.sub(r'[/][v][0-9]+\.[0-9]+[/]', '/', href, count=1)
+
+ #if no version was found, try finding /v#.# at the end of the string
+ if new_href == href:
+ new_href = re.sub(r'[/][v][0-9]+\.[0-9]+$', '', href, count=1)
+ except:
+ LOG.debug(_("Error removing version from href: %s") % href)
+ msg = _('could not parse version from href')
+ raise ValueError(msg)
+
+ if new_href == href:
+ msg = _('href does not contain version')
+ raise ValueError(msg)
+ return new_href
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 8ff92b8fe..d0317583e 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -27,6 +27,7 @@ from nova import log
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.api.openstack import image_metadata
+from nova.api.openstack import servers
from nova.api.openstack.views import images as images_view
from nova.api.openstack import wsgi
@@ -274,59 +275,99 @@ class ControllerV11(Controller):
class ImageXMLSerializer(wsgi.XMLDictSerializer):
- metadata = {
- "attributes": {
- "image": ["id", "name", "updated", "created", "status",
- "serverId", "progress", "serverRef"],
- "link": ["rel", "type", "href"],
- },
- }
-
xmlns = wsgi.XMLNS_V11
def __init__(self):
self.metadata_serializer = image_metadata.ImageMetadataXMLSerializer()
def _image_to_xml(self, xml_doc, image):
- try:
- metadata = image.pop('metadata').items()
- except Exception:
- LOG.debug(_("Image object missing metadata attribute"))
- metadata = {}
-
- node = self._to_xml_node(xml_doc, self.metadata, 'image', image)
- metadata_node = self.metadata_serializer.meta_list_to_xml(xml_doc,
- metadata)
- node.appendChild(metadata_node)
- return node
-
- def _image_list_to_xml(self, xml_doc, images):
+ image_node = xml_doc.createElement('image')
+ image_node.setAttribute('id', str(image['id']))
+ image_node.setAttribute('name', image['name'])
+ link_nodes = self._create_link_nodes(xml_doc,
+ image['links'])
+ for link_node in link_nodes:
+ image_node.appendChild(link_node)
+ return image_node
+
+ def _image_to_xml_detailed(self, xml_doc, image):
+ image_node = xml_doc.createElement('image')
+ self._add_image_attributes(image_node, image)
+
+ if 'server' in image:
+ server_node = self._create_server_node(xml_doc, image['server'])
+ image_node.appendChild(server_node)
+
+ metadata = image.get('metadata', {}).items()
+ if len(metadata) > 0:
+ metadata_node = self._create_metadata_node(xml_doc, metadata)
+ image_node.appendChild(metadata_node)
+
+ link_nodes = self._create_link_nodes(xml_doc,
+ image['links'])
+ for link_node in link_nodes:
+ image_node.appendChild(link_node)
+
+ return image_node
+
+ def _add_image_attributes(self, node, image):
+ node.setAttribute('id', str(image['id']))
+ node.setAttribute('name', image['name'])
+ node.setAttribute('created', image['created'])
+ node.setAttribute('updated', image['updated'])
+ node.setAttribute('status', image['status'])
+ if 'progress' in image:
+ node.setAttribute('progress', str(image['progress']))
+
+ def _create_metadata_node(self, xml_doc, metadata):
+ return self.metadata_serializer.meta_list_to_xml(xml_doc, metadata)
+
+ def _create_server_node(self, xml_doc, server):
+ server_node = xml_doc.createElement('server')
+ server_node.setAttribute('id', str(server['id']))
+ link_nodes = self._create_link_nodes(xml_doc,
+ server['links'])
+ for link_node in link_nodes:
+ server_node.appendChild(link_node)
+ return server_node
+
+ def _image_list_to_xml(self, xml_doc, images, detailed):
container_node = xml_doc.createElement('images')
+ if detailed:
+ image_to_xml = self._image_to_xml_detailed
+ else:
+ image_to_xml = self._image_to_xml
+
for image in images:
- item_node = self._image_to_xml(xml_doc, image)
+ item_node = image_to_xml(xml_doc, image)
container_node.appendChild(item_node)
return container_node
- def _image_to_xml_string(self, image):
- xml_doc = minidom.Document()
- item_node = self._image_to_xml(xml_doc, image)
- self._add_xmlns(item_node)
- return item_node.toprettyxml(indent=' ')
-
- def _image_list_to_xml_string(self, images):
+ def index(self, images_dict):
xml_doc = minidom.Document()
- container_node = self._image_list_to_xml(xml_doc, images)
- self._add_xmlns(container_node)
- return container_node.toprettyxml(indent=' ')
+ node = self._image_list_to_xml(xml_doc,
+ images_dict['images'],
+ detailed=False)
+ return self.to_xml_string(node, True)
def detail(self, images_dict):
- return self._image_list_to_xml_string(images_dict['images'])
+ xml_doc = minidom.Document()
+ node = self._image_list_to_xml(xml_doc,
+ images_dict['images'],
+ detailed=True)
+ return self.to_xml_string(node, True)
def show(self, image_dict):
- return self._image_to_xml_string(image_dict['image'])
+ xml_doc = minidom.Document()
+ node = self._image_to_xml_detailed(xml_doc,
+ image_dict['image'])
+ return self.to_xml_string(node, True)
def create(self, image_dict):
- return self._image_to_xml_string(image_dict['image'])
+ xml_doc = minidom.Document()
+ node = self._image_to_xml_detailed(xml_doc,
+ image_dict['image'])
+ return self.to_xml_string(node, True)
def create_resource(version='1.0'):
diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py
index 005341c62..5c0510377 100644
--- a/nova/api/openstack/views/images.py
+++ b/nova/api/openstack/views/images.py
@@ -98,7 +98,20 @@ class ViewBuilderV11(ViewBuilder):
def _build_server(self, image, image_obj):
try:
- image['serverRef'] = image_obj['properties']['instance_ref']
+ serverRef = image_obj['properties']['instance_ref']
+ image['server'] = {
+ "id": common.get_id_from_href(serverRef),
+ "links": [
+ {
+ "rel": "self",
+ "href": serverRef,
+ },
+ {
+ "rel": "bookmark",
+ "href": common.remove_version_from_href(serverRef),
+ },
+ ]
+ }
except KeyError:
return
@@ -108,18 +121,17 @@ class ViewBuilderV11(ViewBuilder):
href = self.generate_href(image_obj["id"])
bookmark = self.generate_bookmark(image_obj["id"])
- if detail:
- image["metadata"] = image_obj.get("properties", {})
-
image["links"] = [{
"rel": "self",
"href": href,
- },
- {
- "rel": "bookmark",
- "href": bookmark,
}]
+ if detail:
+ image["metadata"] = image_obj.get("properties", {})
+ image["links"].append({"rel": "bookmark",
+ "href": bookmark,
+ })
+
return image
def generate_bookmark(self, image_id):
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index 8eff9e441..c3f841aa5 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -270,13 +270,21 @@ class XMLDictSerializer(DictSerializer):
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
- self._add_xmlns(node)
+ return self.to_xml_string(node)
- return node.toprettyxml(indent=' ', encoding='utf-8')
+ def to_xml_string(self, node, has_atom=False):
+ self._add_xmlns(node, has_atom)
+ return node.toprettyxml(indent=' ', encoding='UTF-8')
- def _add_xmlns(self, node):
+ #NOTE (ameade): the has_atom should be removed after all of the
+ # xml serializers and view builders have been updated to the current
+ # spec that required all responses include the xmlns:atom, the has_atom
+ # flag is to prevent current tests from breaking
+ def _add_xmlns(self, node, has_atom=False):
if self.xmlns is not None:
node.setAttribute('xmlns', self.xmlns)
+ if has_atom:
+ node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
@@ -332,6 +340,15 @@ class XMLDictSerializer(DictSerializer):
result.appendChild(node)
return result
+ def _create_link_nodes(self, xml_doc, links):
+ link_nodes = []
+ for link in links:
+ link_node = xml_doc.createElement('atom:link')
+ link_node.setAttribute('rel', link['rel'])
+ link_node.setAttribute('href', link['href'])
+ link_nodes.append(link_node)
+ return link_nodes
+
class ResponseHeadersSerializer(ActionDispatcher):
"""Default response headers serialization"""
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index c627d2985..960dfea54 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -54,7 +54,7 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
-from nova.notifier import api as notifier_api
+from nova.notifier import api as notifier
from nova.compute.utils import terminate_volumes
from nova.virt import driver
@@ -85,6 +85,10 @@ flags.DEFINE_integer('host_state_interval', 120,
LOG = logging.getLogger('nova.compute.manager')
+def publisher_id(host=None):
+ return notifier.publisher_id("compute", host)
+
+
def checks_instance_lock(function):
"""Decorator to prevent action against locked instances for non-admins."""
@functools.wraps(function)
@@ -183,7 +187,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_security_group_rules(self, context, security_group_id,
**kwargs):
"""Tell the virtualization driver to refresh security group rules.
@@ -193,7 +197,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.refresh_security_group_rules(security_group_id)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_security_group_members(self, context,
security_group_id, **kwargs):
"""Tell the virtualization driver to refresh security group members.
@@ -203,7 +207,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.refresh_security_group_members(security_group_id)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def refresh_provider_fw_rules(self, context, **_kwargs):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
@@ -318,10 +322,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
usage_info = utils.usage_from_instance(instance)
- notifier_api.notify('compute.%s' % self.host,
- 'compute.instance.create',
- notifier_api.INFO,
- usage_info)
+ notifier.notify('compute.%s' % self.host,
+ 'compute.instance.create',
+ notifier.INFO, usage_info)
except exception.InstanceNotFound:
# FIXME(wwolf): We are just ignoring InstanceNotFound
# exceptions here in case the instance was immediately
@@ -329,11 +332,11 @@ class ComputeManager(manager.SchedulerDependentManager):
# be fixed once we have no-db-messaging
pass
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def run_instance(self, context, instance_id, **kwargs):
self._run_instance(context, instance_id, **kwargs)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def start_instance(self, context, instance_id):
"""Starting an instance on this host."""
@@ -366,7 +369,7 @@ class ComputeManager(manager.SchedulerDependentManager):
if action_str == 'Terminating':
terminate_volumes(self.db, context, instance_id)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this host."""
@@ -376,19 +379,18 @@ class ComputeManager(manager.SchedulerDependentManager):
# TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
usage_info = utils.usage_from_instance(instance)
- notifier_api.notify('compute.%s' % self.host,
- 'compute.instance.delete',
- notifier_api.INFO,
- usage_info)
+ notifier.notify('compute.%s' % self.host,
+ 'compute.instance.delete',
+ notifier.INFO, usage_info)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def stop_instance(self, context, instance_id):
"""Stopping an instance on this host."""
self._shutdown_instance(context, instance_id, 'Stopping')
# instance state will be updated to stopped by _poll_instance_states()
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def rebuild_instance(self, context, instance_id, **kwargs):
"""Destroy and re-make this instance.
@@ -418,12 +420,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_state(context, instance_id)
usage_info = utils.usage_from_instance(instance_ref,
image_ref=image_ref)
- notifier_api.notify('compute.%s' % self.host,
+ notifier.notify('compute.%s' % self.host,
'compute.instance.rebuild',
- notifier_api.INFO,
+ notifier.INFO,
usage_info)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def reboot_instance(self, context, instance_id):
"""Reboot an instance on this host."""
@@ -448,7 +450,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.reboot(instance_ref)
self._update_state(context, instance_id)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def snapshot_instance(self, context, instance_id, image_id,
image_type='snapshot', backup_type=None,
rotation=None):
@@ -540,7 +542,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.debug(_("Deleting image %d" % image_id))
image_service.delete(context, image_id)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def set_admin_password(self, context, instance_id, new_pass=None):
"""Set the root/admin password for an instance on this host.
@@ -588,7 +590,7 @@ class ComputeManager(manager.SchedulerDependentManager):
time.sleep(1)
continue
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def inject_file(self, context, instance_id, path, file_contents):
"""Write a file to the specified path in an instance on this host."""
@@ -606,7 +608,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.audit(msg)
self.driver.inject_file(instance_ref, path, file_contents)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def agent_update(self, context, instance_id, url, md5hash):
"""Update agent running on an instance on this host."""
@@ -624,7 +626,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.audit(msg)
self.driver.agent_update(instance_ref, url, md5hash)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
@@ -641,7 +643,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.rescue(instance_ref, _update_state)
self._update_state(context, instance_id)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def unrescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
@@ -662,7 +664,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Update instance state when async task completes."""
self._update_state(context, instance_id)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def confirm_resize(self, context, instance_id, migration_id):
"""Destroys the source instance."""
@@ -670,12 +672,12 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
self.driver.destroy(instance_ref)
usage_info = utils.usage_from_instance(instance_ref)
- notifier_api.notify('compute.%s' % self.host,
+ notifier.notify('compute.%s' % self.host,
'compute.instance.resize.confirm',
- notifier_api.INFO,
+ notifier.INFO,
usage_info)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def revert_resize(self, context, instance_id, migration_id):
"""Destroys the new instance on the destination machine.
@@ -697,7 +699,7 @@ class ComputeManager(manager.SchedulerDependentManager):
'instance_id': instance_id, },
})
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def finish_revert_resize(self, context, instance_id, migration_id):
"""Finishes the second half of reverting a resize.
@@ -722,12 +724,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
usage_info = utils.usage_from_instance(instance_ref)
- notifier_api.notify('compute.%s' % self.host,
+ notifier.notify('compute.%s' % self.host,
'compute.instance.resize.revert',
- notifier_api.INFO,
+ notifier.INFO,
usage_info)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def prep_resize(self, context, instance_id, flavor_id):
"""Initiates the process of moving a running instance to another host.
@@ -765,12 +767,12 @@ class ComputeManager(manager.SchedulerDependentManager):
usage_info = utils.usage_from_instance(instance_ref,
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id'])
- notifier_api.notify('compute.%s' % self.host,
+ notifier.notify('compute.%s' % self.host,
'compute.instance.resize.prep',
- notifier_api.INFO,
+ notifier.INFO,
usage_info)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def resize_instance(self, context, instance_id, migration_id):
"""Starts the migration of a running instance to another host."""
@@ -796,7 +798,7 @@ class ComputeManager(manager.SchedulerDependentManager):
'instance_id': instance_id,
'disk_info': disk_info}})
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def finish_resize(self, context, instance_id, migration_id, disk_info):
"""Completes the migration process.
@@ -828,7 +830,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.db.migration_update(context, migration_id,
{'status': 'finished', })
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def add_fixed_ip_to_instance(self, context, instance_id, network_id):
"""Calls network_api to add new fixed_ip to instance
@@ -840,7 +842,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.inject_network_info(context, instance_id)
self.reset_network(context, instance_id)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def remove_fixed_ip_from_instance(self, context, instance_id, address):
"""Calls network_api to remove existing fixed_ip from instance
@@ -852,7 +854,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.inject_network_info(context, instance_id)
self.reset_network(context, instance_id)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def pause_instance(self, context, instance_id):
"""Pause an instance on this host."""
@@ -869,7 +871,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
result))
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def unpause_instance(self, context, instance_id):
"""Unpause a paused instance on this host."""
@@ -886,13 +888,13 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
result))
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def set_host_enabled(self, context, instance_id=None, host=None,
enabled=None):
"""Sets the specified host's ability to accept new instances."""
return self.driver.set_host_enabled(host, enabled)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
instance_ref = self.db.instance_get(context, instance_id)
@@ -901,7 +903,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context=context)
return self.driver.get_diagnostics(instance_ref)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def suspend_instance(self, context, instance_id):
"""Suspend the given instance."""
@@ -917,7 +919,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
result))
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def resume_instance(self, context, instance_id):
"""Resume the given suspended instance."""
@@ -933,7 +935,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
result))
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def lock_instance(self, context, instance_id):
"""Lock the given instance."""
context = context.elevated()
@@ -941,7 +943,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.debug(_('instance %s: locking'), instance_id, context=context)
self.db.instance_update(context, instance_id, {'locked': True})
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def unlock_instance(self, context, instance_id):
"""Unlock the given instance."""
context = context.elevated()
@@ -949,7 +951,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.debug(_('instance %s: unlocking'), instance_id, context=context)
self.db.instance_update(context, instance_id, {'locked': False})
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def get_lock(self, context, instance_id):
"""Return the boolean state of the given instance's lock."""
context = context.elevated()
@@ -978,7 +980,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.inject_network_info(instance, network_info)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def get_console_output(self, context, instance_id):
"""Send the console output for the given instance."""
context = context.elevated()
@@ -988,7 +990,7 @@ class ComputeManager(manager.SchedulerDependentManager):
output = self.driver.get_console_output(instance_ref)
return output.decode('utf-8', 'replace').encode('ascii', 'replace')
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def get_ajax_console(self, context, instance_id):
"""Return connection information for an ajax console."""
context = context.elevated()
@@ -996,7 +998,7 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_ajax_console(instance_ref)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def get_vnc_console(self, context, instance_id):
"""Return connection information for a vnc console."""
context = context.elevated()
@@ -1059,7 +1061,7 @@ class ComputeManager(manager.SchedulerDependentManager):
return True
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
def _detach_volume(self, context, instance_id, volume_id, destroy_bdm):
"""Detach a volume from an instance."""
@@ -1094,7 +1096,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
self.volume_manager.remove_compute_volume(context, volume_id)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def compare_cpu(self, context, cpu_info):
"""Checks that the host cpu is compatible with a cpu given by xml.
@@ -1105,7 +1107,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.compare_cpu(cpu_info)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def create_shared_storage_test_file(self, context):
"""Makes tmpfile under FLAGS.instance_path.
@@ -1125,7 +1127,7 @@ class ComputeManager(manager.SchedulerDependentManager):
os.close(fd)
return os.path.basename(tmp_file)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def check_shared_storage_test_file(self, context, filename):
"""Confirms existence of the tmpfile under FLAGS.instances_path.
@@ -1137,7 +1139,7 @@ class ComputeManager(manager.SchedulerDependentManager):
if not os.path.exists(tmp_file):
raise exception.FileNotFound(file_path=tmp_file)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def cleanup_shared_storage_test_file(self, context, filename):
"""Removes existence of the tmpfile under FLAGS.instances_path.
@@ -1148,7 +1150,7 @@ class ComputeManager(manager.SchedulerDependentManager):
tmp_file = os.path.join(FLAGS.instances_path, filename)
os.remove(tmp_file)
- @exception.wrap_exception
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def update_available_resource(self, context):
"""See comments update_resource_info.
diff --git a/nova/console/manager.py b/nova/console/manager.py
index e0db21666..2c823b763 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -56,7 +56,7 @@ class ConsoleProxyManager(manager.Manager):
def init_host(self):
self.driver.init_host()
- @exception.wrap_exception
+ @exception.wrap_exception()
def add_console(self, context, instance_id, password=None,
port=None, **kwargs):
instance = self.db.instance_get(context, instance_id)
@@ -83,7 +83,7 @@ class ConsoleProxyManager(manager.Manager):
self.driver.setup_console(context, console)
return console['id']
- @exception.wrap_exception
+ @exception.wrap_exception()
def remove_console(self, context, console_id, **_kwargs):
try:
console = self.db.console_get(context, console_id)
diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py
index acecc1075..0b5ce4a49 100644
--- a/nova/console/vmrc_manager.py
+++ b/nova/console/vmrc_manager.py
@@ -77,7 +77,7 @@ class ConsoleVMRCManager(manager.Manager):
self.driver.setup_console(context, console)
return console
- @exception.wrap_exception
+ @exception.wrap_exception()
def add_console(self, context, instance_id, password=None,
port=None, **kwargs):
"""Adds a console for the instance.
@@ -107,7 +107,7 @@ class ConsoleVMRCManager(manager.Manager):
instance)
return console['id']
- @exception.wrap_exception
+ @exception.wrap_exception()
def remove_console(self, context, console_id, **_kwargs):
"""Removes a console entry."""
try:
diff --git a/nova/exception.py b/nova/exception.py
index 988940d6a..ad6c005f8 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -24,8 +24,9 @@ SHOULD include dedicated exception logging.
"""
-from nova import log as logging
+from functools import wraps
+from nova import log as logging
LOG = logging.getLogger('nova.exception')
@@ -81,19 +82,49 @@ def wrap_db_error(f):
_wrap.func_name = f.func_name
-def wrap_exception(f):
- def _wrap(*args, **kw):
- try:
- return f(*args, **kw)
- except Exception, e:
- if not isinstance(e, Error):
- #exc_type, exc_value, exc_traceback = sys.exc_info()
- LOG.exception(_('Uncaught exception'))
- #logging.error(traceback.extract_stack(exc_traceback))
- raise Error(str(e))
- raise
- _wrap.func_name = f.func_name
- return _wrap
+def wrap_exception(notifier=None, publisher_id=None, event_type=None,
+ level=None):
+ """This decorator wraps a method to catch any exceptions that may
+ get thrown. It logs the exception as well as optionally sending
+ it to the notification system.
+ """
+ # TODO(sandy): Find a way to import nova.notifier.api so we don't have
+ # to pass it in as a parameter. Otherwise we get a cyclic import of
+ # nova.notifier.api -> nova.utils -> nova.exception :(
+ def inner(f):
+ def wrapped(*args, **kw):
+ try:
+ return f(*args, **kw)
+ except Exception, e:
+ if notifier:
+ payload = dict(args=args, exception=e)
+ payload.update(kw)
+
+ # Use a temp vars so we don't shadow
+ # our outer definitions.
+ temp_level = level
+ if not temp_level:
+ temp_level = notifier.ERROR
+
+ temp_type = event_type
+ if not temp_type:
+ # If f has multiple decorators, they must use
+ # functools.wraps to ensure the name is
+ # propagated.
+ temp_type = f.__name__
+
+ notifier.notify(publisher_id, temp_type, temp_level,
+ payload)
+
+ if not isinstance(e, Error):
+ #exc_type, exc_value, exc_traceback = sys.exc_info()
+ LOG.exception(_('Uncaught exception'))
+ #logging.error(traceback.extract_stack(exc_traceback))
+ raise Error(str(e))
+ raise
+
+ return wraps(f)(wrapped)
+ return inner
class NovaException(Exception):
diff --git a/nova/network/manager.py b/nova/network/manager.py
index f17c86524..24736f53d 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -128,6 +128,7 @@ class RPCAllocateFixedIP(object):
"""Calls allocate_fixed_ip once for each network."""
green_pool = greenpool.GreenPool()
+ vpn = kwargs.pop('vpn')
for network in networks:
if network['host'] != self.host:
# need to call allocate_fixed_ip to correct network host
@@ -136,15 +137,14 @@ class RPCAllocateFixedIP(object):
args = {}
args['instance_id'] = instance_id
args['network_id'] = network['id']
- args['vpn'] = kwargs.pop('vpn')
+ args['vpn'] = vpn
green_pool.spawn_n(rpc.call, context, topic,
{'method': '_rpc_allocate_fixed_ip',
'args': args})
else:
# i am the correct host, run here
- self.allocate_fixed_ip(context, instance_id, network,
- vpn=kwargs.pop('vpn'))
+ self.allocate_fixed_ip(context, instance_id, network, vpn=vpn)
# wait for all of the allocates (if any) to finish
green_pool.waitall()
@@ -340,7 +340,7 @@ class NetworkManager(manager.SchedulerDependentManager):
"""Set the network hosts for any networks which are unset."""
try:
networks = self.db.network_get_all(context)
- except Exception.NoNetworksFound:
+ except exception.NoNetworksFound:
# we don't care if no networks are found
pass
@@ -357,7 +357,7 @@ class NetworkManager(manager.SchedulerDependentManager):
# a non-vlan instance should connect to
try:
networks = self.db.network_get_all(context)
- except Exception.NoNetworksFound:
+ except exception.NoNetworksFound:
# we don't care if no networks are found
pass
@@ -706,7 +706,7 @@ class FlatManager(NetworkManager):
timeout_fixed_ips = False
- def _allocate_fixed_ips(self, context, instance_id, networks):
+ def _allocate_fixed_ips(self, context, instance_id, networks, **kwargs):
"""Calls allocate_fixed_ip once for each network."""
for network in networks:
self.allocate_fixed_ip(context, instance_id, network)
@@ -763,7 +763,7 @@ class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager):
self.driver.ensure_bridge(network['bridge'],
network['bridge_interface'])
- def allocate_fixed_ip(self, context, instance_id, network):
+ def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Allocate flat_network fixed_ip, then setup dhcp for this network."""
address = super(FlatDHCPManager, self).allocate_fixed_ip(context,
instance_id,
diff --git a/nova/notifier/api.py b/nova/notifier/api.py
index d49517c8b..98969fd3e 100644
--- a/nova/notifier/api.py
+++ b/nova/notifier/api.py
@@ -17,7 +17,9 @@ import uuid
from nova import flags
from nova import utils
+from nova import log as logging
+LOG = logging.getLogger('nova.exception')
FLAGS = flags.FLAGS
@@ -37,6 +39,12 @@ class BadPriorityException(Exception):
pass
+def publisher_id(service, host=None):
+ if not host:
+ host = FLAGS.host
+ return "%s.%s" % (service, host)
+
+
def notify(publisher_id, event_type, priority, payload):
"""
Sends a notification using the specified driver
@@ -79,4 +87,8 @@ def notify(publisher_id, event_type, priority, payload):
priority=priority,
payload=payload,
timestamp=str(utils.utcnow()))
- driver.notify(msg)
+ try:
+ driver.notify(msg)
+ except Exception, e:
+ LOG.exception(_("Problem '%(e)s' attempting to "
+ "send to notification system." % locals()))
diff --git a/nova/rpc.py b/nova/rpc.py
index f52f377b0..e2771ca88 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -219,7 +219,7 @@ class AdapterConsumer(Consumer):
return
self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args)
- @exception.wrap_exception
+ @exception.wrap_exception()
def _process_data(self, msg_id, ctxt, method, args):
"""Thread that maigcally looks for a method on the proxy
object and calls it.
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index d4a30255d..1bfa7740a 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -31,6 +31,7 @@ from nova import rpc
from nova import utils
from nova.compute import power_state
+
FLAGS = flags.FLAGS
flags.DEFINE_integer('service_down_time', 60,
'maximum time since last checkin for up service')
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 29cb8b944..4c4d03995 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -190,3 +190,60 @@ class PaginationParamsTest(test.TestCase):
req = Request.blank('/?limit=20&marker=40')
self.assertEqual(common.get_pagination_params(req),
{'marker': 40, 'limit': 20})
+
+
+class MiscFunctionsTest(test.TestCase):
+
+ def test_remove_version_from_href(self):
+ fixture = 'http://www.testsite.com/v1.1/images'
+ expected = 'http://www.testsite.com/images'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href_2(self):
+ fixture = 'http://www.testsite.com/v1.1/'
+ expected = 'http://www.testsite.com/'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href_3(self):
+ fixture = 'http://www.testsite.com/v10.10'
+ expected = 'http://www.testsite.com'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href_4(self):
+ fixture = 'http://www.testsite.com/v1.1/images/v10.5'
+ expected = 'http://www.testsite.com/images/v10.5'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href_bad_request(self):
+ fixture = 'http://www.testsite.com/1.1/images'
+ self.assertRaises(ValueError,
+ common.remove_version_from_href,
+ fixture)
+
+ def test_remove_version_from_href_bad_request_2(self):
+ fixture = 'http://www.testsite.com/v/images'
+ self.assertRaises(ValueError,
+ common.remove_version_from_href,
+ fixture)
+
+ def test_remove_version_from_href_bad_request_3(self):
+ fixture = 'http://www.testsite.com/v1.1images'
+ self.assertRaises(ValueError,
+ common.remove_version_from_href,
+ fixture)
+
+ def test_get_id_from_href(self):
+ fixture = 'http://www.testsite.com/dir/45'
+ actual = common.get_id_from_href(fixture)
+ expected = 45
+ self.assertEqual(actual, expected)
+
+ def test_get_id_from_href_bad_request(self):
+ fixture = 'http://45'
+ self.assertRaises(ValueError,
+ common.get_id_from_href,
+ fixture)
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index f451ee145..c1bdd6906 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -401,15 +401,27 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
href = "http://localhost/v1.1/images/124"
bookmark = "http://localhost/images/124"
+ server_href = "http://localhost/v1.1/servers/42"
+ server_bookmark = "http://localhost/servers/42"
expected_image = {
"image": {
"id": 124,
"name": "queued snapshot",
- "serverRef": "http://localhost/v1.1/servers/42",
"updated": self.NOW_API_FORMAT,
"created": self.NOW_API_FORMAT,
"status": "QUEUED",
+ 'server': {
+ 'id': 42,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
"metadata": {
"instance_ref": "http://localhost/v1.1/servers/42",
"user_id": "1",
@@ -559,10 +571,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
"links": [{
"rel": "self",
"href": href,
- },
- {
- "rel": "bookmark",
- "href": bookmark,
}],
}
self.assertTrue(test_image in response_list)
@@ -628,6 +636,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response_dict = json.loads(response.body)
response_list = response_dict["images"]
+ server_href = "http://localhost/v1.1/servers/42"
+ server_bookmark = "http://localhost/servers/42"
expected = [{
'id': 123,
@@ -652,10 +662,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
u'instance_ref': u'http://localhost/v1.1/servers/42',
u'user_id': u'1',
},
- 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
+ 'server': {
+ 'id': 42,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
"links": [{
"rel": "self",
"href": "http://localhost/v1.1/images/124",
@@ -672,11 +692,21 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
u'instance_ref': u'http://localhost/v1.1/servers/42',
u'user_id': u'1',
},
- 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
'progress': 0,
+ 'server': {
+ 'id': 42,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
"links": [{
"rel": "self",
"href": "http://localhost/v1.1/images/125",
@@ -693,10 +723,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
u'instance_ref': u'http://localhost/v1.1/servers/42',
u'user_id': u'1',
},
- 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
+ 'server': {
+ 'id': 42,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
"links": [{
"rel": "self",
"href": "http://localhost/v1.1/images/126",
@@ -713,10 +753,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
u'instance_ref': u'http://localhost/v1.1/servers/42',
u'user_id': u'1',
},
- 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
+ 'server': {
+ 'id': 42,
+ "links": [{
+ "rel": "self",
+ "href": server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": server_bookmark,
+ }],
+ },
"links": [{
"rel": "self",
"href": "http://localhost/v1.1/images/127",
@@ -1036,6 +1086,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
def test_create_image_v1_1_actual_server_ref(self):
serverRef = 'http://localhost/v1.1/servers/1'
+ serverBookmark = 'http://localhost/servers/1'
body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
req = webob.Request.blank('/v1.1/images')
req.method = 'POST'
@@ -1044,11 +1095,25 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(200, response.status_int)
result = json.loads(response.body)
- self.assertEqual(result['image']['serverRef'], serverRef)
+ expected = {
+ 'id': 1,
+ 'links': [
+ {
+ 'rel': 'self',
+ 'href': serverRef,
+ },
+ {
+ 'rel': 'bookmark',
+ 'href': serverBookmark,
+ },
+ ]
+ }
+ self.assertEqual(result['image']['server'], expected)
def test_create_image_v1_1_actual_server_ref_port(self):
serverRef = 'http://localhost:8774/v1.1/servers/1'
+ serverBookmark = 'http://localhost:8774/servers/1'
body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
req = webob.Request.blank('/v1.1/images')
req.method = 'POST'
@@ -1057,7 +1122,20 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(200, response.status_int)
result = json.loads(response.body)
- self.assertEqual(result['image']['serverRef'], serverRef)
+ expected = {
+ 'id': 1,
+ 'links': [
+ {
+ 'rel': 'self',
+ 'href': serverRef,
+ },
+ {
+ 'rel': 'bookmark',
+ 'href': serverBookmark,
+ },
+ ]
+ }
+ self.assertEqual(result['image']['server'], expected)
def test_create_image_v1_1_server_ref_bad_hostname(self):
@@ -1080,6 +1158,28 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
+ def test_create_image_v1_1_server_ref_missing_version(self):
+
+ serverRef = 'http://localhost/servers/1'
+ body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
+ req = webob.Request.blank('/v1.1/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_image_v1_1_server_ref_missing_id(self):
+
+ serverRef = 'http://localhost/v1.1/servers'
+ body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
+ req = webob.Request.blank('/v1.1/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
@classmethod
def _make_image_fixtures(cls):
image_id = 123
@@ -1128,7 +1228,9 @@ class ImageXMLSerializationTest(test.TestCase):
TIMESTAMP = "2010-10-11T10:30:22Z"
SERVER_HREF = 'http://localhost/v1.1/servers/123'
+ SERVER_BOOKMARK = 'http://localhost/servers/123'
IMAGE_HREF = 'http://localhost/v1.1/images/%s'
+ IMAGE_BOOKMARK = 'http://localhost/images/%s'
def test_show(self):
serializer = images.ImageXMLSerializer()
@@ -1139,16 +1241,32 @@ class ImageXMLSerializationTest(test.TestCase):
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
- 'serverRef': self.SERVER_HREF,
'status': 'ACTIVE',
+ 'progress': 80,
+ 'server': {
+ 'id': 1,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
'metadata': {
'key1': 'value1',
},
'links': [
{
- 'href': self.IMAGE_HREF % (1,),
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
- 'type': 'application/json',
},
],
},
@@ -1158,25 +1276,30 @@ class ImageXMLSerializationTest(test.TestCase):
actual = minidom.parseString(output.replace(" ", ""))
expected_server_href = self.SERVER_HREF
- expected_href = self.IMAGE_HREF % (1, )
+ expected_server_bookmark = self.SERVER_BOOKMARK
+ expected_href = self.IMAGE_HREF % 1
+ expected_bookmark = self.IMAGE_BOOKMARK % 1
expected_now = self.TIMESTAMP
expected = minidom.parseString("""
<image id="1"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
name="Image1"
- serverRef="%(expected_server_href)s"
updated="%(expected_now)s"
created="%(expected_now)s"
status="ACTIVE"
- xmlns="http://docs.openstack.org/compute/api/v1.1">
- <links>
- <link href="%(expected_href)s" rel="bookmark"
- type="application/json" />
- </links>
+ progress="80">
+ <server id="1">
+ <atom:link rel="self" href="%(expected_server_href)s"/>
+ <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/>
+ </server>
<metadata>
<meta key="key1">
value1
</meta>
</metadata>
+ <atom:link href="%(expected_href)s" rel="self"/>
+ <atom:link href="%(expected_bookmark)s" rel="bookmark"/>
</image>
""".replace(" ", "") % (locals()))
@@ -1191,14 +1314,29 @@ class ImageXMLSerializationTest(test.TestCase):
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
- 'serverRef': self.SERVER_HREF,
'status': 'ACTIVE',
+ 'server': {
+ 'id': 1,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
'metadata': {},
'links': [
{
- 'href': self.IMAGE_HREF % (1,),
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
- 'type': 'application/json',
},
],
},
@@ -1208,21 +1346,24 @@ class ImageXMLSerializationTest(test.TestCase):
actual = minidom.parseString(output.replace(" ", ""))
expected_server_href = self.SERVER_HREF
- expected_href = self.IMAGE_HREF % (1, )
+ expected_server_bookmark = self.SERVER_BOOKMARK
+ expected_href = self.IMAGE_HREF % 1
+ expected_bookmark = self.IMAGE_BOOKMARK % 1
expected_now = self.TIMESTAMP
expected = minidom.parseString("""
<image id="1"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
name="Image1"
- serverRef="%(expected_server_href)s"
updated="%(expected_now)s"
created="%(expected_now)s"
- status="ACTIVE"
- xmlns="http://docs.openstack.org/compute/api/v1.1">
- <links>
- <link href="%(expected_href)s" rel="bookmark"
- type="application/json" />
- </links>
- <metadata />
+ status="ACTIVE">
+ <server id="1">
+ <atom:link rel="self" href="%(expected_server_href)s"/>
+ <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/>
+ </server>
+ <atom:link href="%(expected_href)s" rel="self"/>
+ <atom:link href="%(expected_bookmark)s" rel="bookmark"/>
</image>
""".replace(" ", "") % (locals()))
@@ -1237,16 +1378,30 @@ class ImageXMLSerializationTest(test.TestCase):
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
- 'serverRef': self.SERVER_HREF,
'status': 'ACTIVE',
+ 'server': {
+ 'id': 1,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
'links': [
{
- 'href': self.IMAGE_HREF % (1,),
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
- 'type': 'application/json',
},
],
-
},
}
@@ -1254,21 +1409,76 @@ class ImageXMLSerializationTest(test.TestCase):
actual = minidom.parseString(output.replace(" ", ""))
expected_server_href = self.SERVER_HREF
- expected_href = self.IMAGE_HREF % (1, )
+ expected_server_bookmark = self.SERVER_BOOKMARK
+ expected_href = self.IMAGE_HREF % 1
+ expected_bookmark = self.IMAGE_BOOKMARK % 1
expected_now = self.TIMESTAMP
expected = minidom.parseString("""
<image id="1"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
name="Image1"
- serverRef="%(expected_server_href)s"
updated="%(expected_now)s"
created="%(expected_now)s"
- status="ACTIVE"
- xmlns="http://docs.openstack.org/compute/api/v1.1">
- <links>
- <link href="%(expected_href)s" rel="bookmark"
- type="application/json" />
- </links>
- <metadata />
+ status="ACTIVE">
+ <server id="1">
+ <atom:link rel="self" href="%(expected_server_href)s"/>
+ <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/>
+ </server>
+ <atom:link href="%(expected_href)s" rel="self"/>
+ <atom:link href="%(expected_bookmark)s" rel="bookmark"/>
+ </image>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_show_no_server(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture, 'show')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_href = self.IMAGE_HREF % 1
+ expected_bookmark = self.IMAGE_BOOKMARK % 1
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <image id="1"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ name="Image1"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE">
+ <metadata>
+ <meta key="key1">
+ value1
+ </meta>
+ </metadata>
+ <atom:link href="%(expected_href)s" rel="self"/>
+ <atom:link href="%(expected_bookmark)s" rel="bookmark"/>
</image>
""".replace(" ", "") % (locals()))
@@ -1277,70 +1487,51 @@ class ImageXMLSerializationTest(test.TestCase):
def test_index(self):
serializer = images.ImageXMLSerializer()
- fixtures = {
+ fixture = {
'images': [
{
'id': 1,
'name': 'Image1',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- 'serverRef': self.SERVER_HREF,
- 'status': 'ACTIVE',
'links': [
{
- 'href': 'http://localhost/v1.1/images/1',
- 'rel': 'bookmark',
- 'type': 'application/json',
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
},
],
},
{
'id': 2,
- 'name': 'queued image',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- 'serverRef': self.SERVER_HREF,
- 'status': 'QUEUED',
+ 'name': 'Image2',
'links': [
{
- 'href': 'http://localhost/v1.1/images/2',
- 'rel': 'bookmark',
- 'type': 'application/json',
+ 'href': self.IMAGE_HREF % 2,
+ 'rel': 'self',
},
],
},
- ],
+ ]
}
- output = serializer.serialize(fixtures, 'index')
+ output = serializer.serialize(fixture, 'index')
actual = minidom.parseString(output.replace(" ", ""))
- expected_serverRef = self.SERVER_HREF
+ expected_server_href = self.SERVER_HREF
+ expected_server_bookmark = self.SERVER_BOOKMARK
+ expected_href = self.IMAGE_HREF % 1
+ expected_bookmark = self.IMAGE_BOOKMARK % 1
+ expected_href_two = self.IMAGE_HREF % 2
+ expected_bookmark_two = self.IMAGE_BOOKMARK % 2
expected_now = self.TIMESTAMP
expected = minidom.parseString("""
- <images xmlns="http://docs.openstack.org/compute/api/v1.1">
- <image id="1"
- name="Image1"
- serverRef="%(expected_serverRef)s"
- updated="%(expected_now)s"
- created="%(expected_now)s"
- status="ACTIVE">
- <links>
- <link href="http://localhost/v1.1/images/1" rel="bookmark"
- type="application/json" />
- </links>
- </image>
- <image id="2"
- name="queued image"
- serverRef="%(expected_serverRef)s"
- updated="%(expected_now)s"
- created="%(expected_now)s"
- status="QUEUED">
- <links>
- <link href="http://localhost/v1.1/images/2" rel="bookmark"
- type="application/json" />
- </links>
- </image>
+ <images
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom">
+ <image id="1" name="Image1">
+ <atom:link href="%(expected_href)s" rel="self"/>
+ </image>
+ <image id="2" name="Image2">
+ <atom:link href="%(expected_href_two)s" rel="self"/>
+ </image>
</images>
""".replace(" ", "") % (locals()))
@@ -1356,10 +1547,10 @@ class ImageXMLSerializationTest(test.TestCase):
output = serializer.serialize(fixtures, 'index')
actual = minidom.parseString(output.replace(" ", ""))
- expected_serverRef = self.SERVER_HREF
- expected_now = self.TIMESTAMP
expected = minidom.parseString("""
- <images xmlns="http://docs.openstack.org/compute/api/v1.1" />
+ <images
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom" />
""".replace(" ", "") % (locals()))
self.assertEqual(expected.toxml(), actual.toxml())
@@ -1367,84 +1558,102 @@ class ImageXMLSerializationTest(test.TestCase):
def test_detail(self):
serializer = images.ImageXMLSerializer()
- fixtures = {
+ fixture = {
'images': [
{
'id': 1,
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
- 'serverRef': self.SERVER_HREF,
'status': 'ACTIVE',
- 'metadata': {
- 'key1': 'value1',
- 'key2': 'value2',
+ 'server': {
+ 'id': 1,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
},
'links': [
{
- 'href': 'http://localhost/v1.1/images/1',
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
- 'type': 'application/json',
},
],
},
{
'id': 2,
- 'name': 'queued image',
+ 'name': 'Image2',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
- 'serverRef': self.SERVER_HREF,
- 'metadata': {},
- 'status': 'QUEUED',
+ 'status': 'SAVING',
+ 'progress': 80,
+ 'metadata': {
+ 'key1': 'value1',
+ },
'links': [
{
- 'href': 'http://localhost/v1.1/images/2',
+ 'href': self.IMAGE_HREF % 2,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 2,
'rel': 'bookmark',
- 'type': 'application/json',
},
],
},
- ],
+ ]
}
- output = serializer.serialize(fixtures, 'detail')
+ output = serializer.serialize(fixture, 'detail')
actual = minidom.parseString(output.replace(" ", ""))
- expected_serverRef = self.SERVER_HREF
+ expected_server_href = self.SERVER_HREF
+ expected_server_bookmark = self.SERVER_BOOKMARK
+ expected_href = self.IMAGE_HREF % 1
+ expected_bookmark = self.IMAGE_BOOKMARK % 1
+ expected_href_two = self.IMAGE_HREF % 2
+ expected_bookmark_two = self.IMAGE_BOOKMARK % 2
expected_now = self.TIMESTAMP
expected = minidom.parseString("""
- <images xmlns="http://docs.openstack.org/compute/api/v1.1">
- <image id="1"
- name="Image1"
- serverRef="%(expected_serverRef)s"
- updated="%(expected_now)s"
- created="%(expected_now)s"
- status="ACTIVE">
- <links>
- <link href="http://localhost/v1.1/images/1" rel="bookmark"
- type="application/json" />
- </links>
- <metadata>
- <meta key="key2">
- value2
- </meta>
- <meta key="key1">
- value1
- </meta>
- </metadata>
- </image>
- <image id="2"
- name="queued image"
- serverRef="%(expected_serverRef)s"
- updated="%(expected_now)s"
- created="%(expected_now)s"
- status="QUEUED">
- <links>
- <link href="http://localhost/v1.1/images/2" rel="bookmark"
- type="application/json" />
- </links>
- <metadata />
- </image>
+ <images
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom">
+ <image id="1"
+ name="Image1"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE">
+ <server id="1">
+ <atom:link rel="self" href="%(expected_server_href)s"/>
+ <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/>
+ </server>
+ <atom:link href="%(expected_href)s" rel="self"/>
+ <atom:link href="%(expected_bookmark)s" rel="bookmark"/>
+ </image>
+ <image id="2"
+ name="Image2"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="SAVING"
+ progress="80">
+ <metadata>
+ <meta key="key1">
+ value1
+ </meta>
+ </metadata>
+ <atom:link href="%(expected_href_two)s" rel="self"/>
+ <atom:link href="%(expected_bookmark_two)s" rel="bookmark"/>
+ </image>
</images>
""".replace(" ", "") % (locals()))
@@ -1459,16 +1668,32 @@ class ImageXMLSerializationTest(test.TestCase):
'name': 'Image1',
'created': self.TIMESTAMP,
'updated': self.TIMESTAMP,
- 'serverRef': self.SERVER_HREF,
- 'status': 'ACTIVE',
+ 'status': 'SAVING',
+ 'progress': 80,
+ 'server': {
+ 'id': 1,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
'metadata': {
'key1': 'value1',
},
'links': [
{
- 'href': self.IMAGE_HREF % (1,),
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
'rel': 'bookmark',
- 'type': 'application/json',
},
],
},
@@ -1478,25 +1703,30 @@ class ImageXMLSerializationTest(test.TestCase):
actual = minidom.parseString(output.replace(" ", ""))
expected_server_href = self.SERVER_HREF
- expected_href = self.IMAGE_HREF % (1, )
+ expected_server_bookmark = self.SERVER_BOOKMARK
+ expected_href = self.IMAGE_HREF % 1
+ expected_bookmark = self.IMAGE_BOOKMARK % 1
expected_now = self.TIMESTAMP
expected = minidom.parseString("""
<image id="1"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
name="Image1"
- serverRef="%(expected_server_href)s"
updated="%(expected_now)s"
created="%(expected_now)s"
- status="ACTIVE"
- xmlns="http://docs.openstack.org/compute/api/v1.1">
- <links>
- <link href="%(expected_href)s" rel="bookmark"
- type="application/json" />
- </links>
+ status="SAVING"
+ progress="80">
+ <server id="1">
+ <atom:link rel="self" href="%(expected_server_href)s"/>
+ <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/>
+ </server>
<metadata>
<meta key="key1">
value1
</meta>
</metadata>
+ <atom:link href="%(expected_href)s" rel="self"/>
+ <atom:link href="%(expected_bookmark)s" rel="bookmark"/>
</image>
""".replace(" ", "") % (locals()))
diff --git a/nova/tests/test_exception.py b/nova/tests/test_exception.py
index 4d3b9cc73..cd74f8871 100644
--- a/nova/tests/test_exception.py
+++ b/nova/tests/test_exception.py
@@ -32,3 +32,66 @@ class ApiErrorTestCase(test.TestCase):
self.assertEqual(err.__str__(), 'blah code: fake error')
self.assertEqual(err.code, 'blah code')
self.assertEqual(err.msg, 'fake error')
+
+
+class FakeNotifier(object):
+ """Acts like the nova.notifier.api module."""
+ ERROR = 88
+
+ def __init__(self):
+ self.provided_publisher = None
+ self.provided_event = None
+ self.provided_priority = None
+ self.provided_payload = None
+
+ def notify(self, publisher, event, priority, payload):
+ self.provided_publisher = publisher
+ self.provided_event = event
+ self.provided_priority = priority
+ self.provided_payload = payload
+
+
+def good_function():
+ return 99
+
+
+def bad_function_error():
+ raise exception.Error()
+
+
+def bad_function_exception():
+ raise Exception()
+
+
+class WrapExceptionTestCase(test.TestCase):
+ def test_wrap_exception_good_return(self):
+ wrapped = exception.wrap_exception()
+ self.assertEquals(99, wrapped(good_function)())
+
+ def test_wrap_exception_throws_error(self):
+ wrapped = exception.wrap_exception()
+ self.assertRaises(exception.Error, wrapped(bad_function_error))
+
+ def test_wrap_exception_throws_exception(self):
+ wrapped = exception.wrap_exception()
+ # Note that Exception is converted to Error ...
+ self.assertRaises(exception.Error, wrapped(bad_function_exception))
+
+ def test_wrap_exception_with_notifier(self):
+ notifier = FakeNotifier()
+ wrapped = exception.wrap_exception(notifier, "publisher", "event",
+ "level")
+ self.assertRaises(exception.Error, wrapped(bad_function_exception))
+ self.assertEquals(notifier.provided_publisher, "publisher")
+ self.assertEquals(notifier.provided_event, "event")
+ self.assertEquals(notifier.provided_priority, "level")
+ for key in ['exception', 'args']:
+ self.assertTrue(key in notifier.provided_payload.keys())
+
+ def test_wrap_exception_with_notifier_defaults(self):
+ notifier = FakeNotifier()
+ wrapped = exception.wrap_exception(notifier)
+ self.assertRaises(exception.Error, wrapped(bad_function_exception))
+ self.assertEquals(notifier.provided_publisher, None)
+ self.assertEquals(notifier.provided_event, "bad_function_exception")
+ self.assertEquals(notifier.provided_priority, notifier.ERROR)
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index e912c2bec..977bb7dfe 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -331,7 +331,7 @@ class LibvirtConnection(driver.ComputeDriver):
if os.path.exists(target):
shutil.rmtree(target)
- @exception.wrap_exception
+ @exception.wrap_exception()
def attach_volume(self, instance_name, device_path, mountpoint):
virt_dom = self._lookup_by_name(instance_name)
mount_device = mountpoint.rpartition("/")[2]
@@ -375,7 +375,7 @@ class LibvirtConnection(driver.ComputeDriver):
if doc is not None:
doc.freeDoc()
- @exception.wrap_exception
+ @exception.wrap_exception()
def detach_volume(self, instance_name, mountpoint):
virt_dom = self._lookup_by_name(instance_name)
mount_device = mountpoint.rpartition("/")[2]
@@ -384,7 +384,7 @@ class LibvirtConnection(driver.ComputeDriver):
raise exception.DiskNotFound(location=mount_device)
virt_dom.detachDevice(xml)
- @exception.wrap_exception
+ @exception.wrap_exception()
def snapshot(self, instance, image_href):
"""Create snapshot from a running VM instance.
@@ -460,7 +460,7 @@ class LibvirtConnection(driver.ComputeDriver):
# Clean up
shutil.rmtree(temp_dir)
- @exception.wrap_exception
+ @exception.wrap_exception()
def reboot(self, instance):
"""Reboot a virtual machine, given an instance reference.
@@ -501,31 +501,31 @@ class LibvirtConnection(driver.ComputeDriver):
timer = utils.LoopingCall(_wait_for_reboot)
return timer.start(interval=0.5, now=True)
- @exception.wrap_exception
+ @exception.wrap_exception()
def pause(self, instance, callback):
"""Pause VM instance"""
dom = self._lookup_by_name(instance.name)
dom.suspend()
- @exception.wrap_exception
+ @exception.wrap_exception()
def unpause(self, instance, callback):
"""Unpause paused VM instance"""
dom = self._lookup_by_name(instance.name)
dom.resume()
- @exception.wrap_exception
+ @exception.wrap_exception()
def suspend(self, instance, callback):
"""Suspend the specified instance"""
dom = self._lookup_by_name(instance.name)
dom.managedSave(0)
- @exception.wrap_exception
+ @exception.wrap_exception()
def resume(self, instance, callback):
"""resume the specified instance"""
dom = self._lookup_by_name(instance.name)
dom.create()
- @exception.wrap_exception
+ @exception.wrap_exception()
def rescue(self, instance):
"""Loads a VM using rescue images.
@@ -563,7 +563,7 @@ class LibvirtConnection(driver.ComputeDriver):
timer = utils.LoopingCall(_wait_for_rescue)
return timer.start(interval=0.5, now=True)
- @exception.wrap_exception
+ @exception.wrap_exception()
def unrescue(self, instance):
"""Reboot the VM which is being rescued back into primary images.
@@ -573,13 +573,13 @@ class LibvirtConnection(driver.ComputeDriver):
"""
self.reboot(instance)
- @exception.wrap_exception
+ @exception.wrap_exception()
def poll_rescued_instances(self, timeout):
pass
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
- @exception.wrap_exception
+ @exception.wrap_exception()
def spawn(self, instance, network_info=None, block_device_mapping=None):
xml = self.to_xml(instance, False, network_info=network_info,
block_device_mapping=block_device_mapping)
@@ -642,7 +642,7 @@ class LibvirtConnection(driver.ComputeDriver):
LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals())
return contents
- @exception.wrap_exception
+ @exception.wrap_exception()
def get_console_output(self, instance):
console_log = os.path.join(FLAGS.instances_path, instance['name'],
'console.log')
@@ -663,7 +663,7 @@ class LibvirtConnection(driver.ComputeDriver):
return self._dump_file(fpath)
- @exception.wrap_exception
+ @exception.wrap_exception()
def get_ajax_console(self, instance):
def get_open_port():
start_port, end_port = FLAGS.ajaxterm_portrange.split("-")
@@ -704,7 +704,7 @@ class LibvirtConnection(driver.ComputeDriver):
def get_host_ip_addr(self):
return FLAGS.my_ip
- @exception.wrap_exception
+ @exception.wrap_exception()
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 56718f8e8..c332c27b0 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -597,7 +597,9 @@ class VMOps(object):
# No response from the agent
return
resp_dict = json.loads(resp)
- return resp_dict['message']
+ # Some old versions of the Windows agent have a trailing \\r\\n
+ # (ie CRLF escaped) for some reason. Strip that off.
+ return resp_dict['message'].replace('\\r\\n', '')
if timeout:
vm_ref = self._get_vm_opaque_ref(instance)
@@ -662,9 +664,13 @@ class VMOps(object):
# There was some sort of error; the message will contain
# a description of the error.
raise RuntimeError(resp_dict['message'])
- agent_pub = int(resp_dict['message'])
+ # Some old versions of the Windows agent have a trailing \\r\\n
+ # (ie CRLF escaped) for some reason. Strip that off.
+ agent_pub = int(resp_dict['message'].replace('\\r\\n', ''))
dh.compute_shared(agent_pub)
- enc_pass = dh.encrypt(new_pass)
+ # Some old versions of Linux and Windows agent expect trailing \n
+ # on password to work correctly.
+ enc_pass = dh.encrypt(new_pass + '\n')
# Send the encrypted password
password_transaction_id = str(uuid.uuid4())
password_args = {'id': password_transaction_id, 'enc_pass': enc_pass}