diff options
| author | Naveed Massjouni <naveedm9@gmail.com> | 2011-08-02 20:11:44 -0400 |
|---|---|---|
| committer | Naveed Massjouni <naveedm9@gmail.com> | 2011-08-02 20:11:44 -0400 |
| commit | 60e486ad24ae8a80fea4e2a917a7366ef10740bd (patch) | |
| tree | 41eef174f29d3bd813d25fb543056bc07e55ca88 | |
| parent | 695afaffaa4de359b306280c252f8f40a3bab5a7 (diff) | |
| parent | e2770a4558c95aa4b6e276ebe18dc580a82e6d67 (diff) | |
| download | nova-60e486ad24ae8a80fea4e2a917a7366ef10740bd.tar.gz nova-60e486ad24ae8a80fea4e2a917a7366ef10740bd.tar.xz nova-60e486ad24ae8a80fea4e2a917a7366ef10740bd.zip | |
Merge from trunk.
80 files changed, 2007 insertions, 2822 deletions
diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor deleted file mode 100755 index b9d4e49d7..000000000 --- a/bin/nova-instancemonitor +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" - Daemon for Nova RRD based instance resource monitoring. -""" - -import gettext -import os -import sys -from twisted.application import service - -# If ../nova/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): - sys.path.insert(0, possible_topdir) - -gettext.install('nova', unicode=1) - -from nova import log as logging -from nova import utils -from nova import twistd -from nova.compute import monitor - -LOG = logging.getLogger('nova.instancemonitor') - - -if __name__ == '__main__': - utils.default_flagfile() - twistd.serve(__file__) - -if __name__ == '__builtin__': - LOG.warn(_('Starting instance monitor')) - # pylint: disable=C0103 - monitor = monitor.InstanceMonitor() - - # This is the parent service that twistd will be looking for when it - # parses this file, return it so that we can get it into globals below - application = service.Application('nova-instancemonitor') - monitor.setServiceParent(application) diff --git a/bin/nova-logspool b/bin/nova-logspool index 097459b12..a876f4c71 100644 --- a/bin/nova-logspool +++ b/bin/nova-logspool @@ -81,7 +81,6 @@ class LogReader(object): if level == 'ERROR': self.handle_logged_error(line) elif level == '[-]' and self.last_error: - # twisted stack trace line clean_line = " ".join(line.split(" ")[6:]) self.last_error.trace = self.last_error.trace + clean_line else: diff --git a/bin/nova-manage b/bin/nova-manage index 75d74903c..807753a2e 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -662,8 +662,9 @@ class NetworkCommands(object): # check for certain required inputs if not label: raise exception.NetworkNotCreated(req='--label') - if not fixed_range_v4: - raise exception.NetworkNotCreated(req='--fixed_range_v4') + if not (fixed_range_v4 or fixed_range_v6): + req = '--fixed_range_v4 or --fixed_range_v6' + raise exception.NetworkNotCreated(req=req) bridge = bridge or FLAGS.flat_network_bridge if not bridge: @@ -689,16 +690,6 @@ class NetworkCommands(object): if FLAGS.network_manager in interface_required: raise exception.NetworkNotCreated(req='--bridge_interface') - if FLAGS.use_ipv6: - fixed_range_v6 = fixed_range_v6 or FLAGS.fixed_range_v6 - if not fixed_range_v6: - raise exception.NetworkNotCreated(req='with use_ipv6, ' - '--fixed_range_v6') - gateway_v6 = gateway_v6 or FLAGS.gateway_v6 - if not gateway_v6: - raise exception.NetworkNotCreated(req='with use_ipv6, ' - '--gateway_v6') - # sanitize other input using FLAGS if necessary if not num_networks: num_networks = FLAGS.num_networks @@ -735,8 +726,8 @@ class NetworkCommands(object): def list(self): """List all created networks""" print "%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" % ( - _('network'), - _('netmask'), + _('IPv4'), + _('IPv6'), _('start address'), _('DNS1'), _('DNS2'), @@ -745,7 +736,7 @@ class NetworkCommands(object): for network in db.network_get_all(context.get_admin_context()): print "%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" % ( network.cidr, - network.netmask, + network.cidr_v6, network.dhcp_start, network.dns1, network.dns2, diff --git a/bin/nova-objectstore b/bin/nova-objectstore index 1aef3a255..4d5aec445 100755 --- a/bin/nova-objectstore +++ b/bin/nova-objectstore @@ -18,7 +18,7 @@ # under the License. """ - Twisted daemon for nova objectstore. Supports S3 API. + Daemon for nova objectstore. Supports S3 API. """ import gettext diff --git a/contrib/nova.sh b/contrib/nova.sh index eab680580..7994e5133 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -75,7 +75,7 @@ if [ "$CMD" == "install" ]; then sudo modprobe kvm sudo /etc/init.d/libvirt-bin restart sudo modprobe nbd - sudo apt-get install -y python-twisted python-mox python-ipy python-paste + sudo apt-get install -y python-mox python-ipy python-paste sudo apt-get install -y python-migrate python-gflags python-greenlet sudo apt-get install -y python-libvirt python-libxml2 python-routes sudo apt-get install -y python-netaddr python-pastedeploy python-eventlet diff --git a/doc/source/api/autoindex.rst b/doc/source/api/autoindex.rst index 329a465db..d99d16eaa 100644 --- a/doc/source/api/autoindex.rst +++ b/doc/source/api/autoindex.rst @@ -26,7 +26,6 @@ nova..compute.api.rst nova..compute.instance_types.rst nova..compute.manager.rst - nova..compute.monitor.rst nova..compute.power_state.rst nova..console.api.rst nova..console.fake.rst @@ -115,13 +114,11 @@ nova..tests.test_scheduler.rst nova..tests.test_service.rst nova..tests.test_test.rst - nova..tests.test_twistd.rst nova..tests.test_utils.rst nova..tests.test_virt.rst nova..tests.test_volume.rst nova..tests.test_xenapi.rst nova..tests.xenapi.stubs.rst - nova..twistd.rst nova..utils.rst nova..version.rst nova..virt.connection.rst diff --git a/doc/source/api/nova..compute.monitor.rst b/doc/source/api/nova..compute.monitor.rst deleted file mode 100644 index a91169ecd..000000000 --- a/doc/source/api/nova..compute.monitor.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..compute.monitor` Module -============================================================================== -.. automodule:: nova..compute.monitor - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.test_twistd.rst b/doc/source/api/nova..tests.test_twistd.rst deleted file mode 100644 index cae0c0a28..000000000 --- a/doc/source/api/nova..tests.test_twistd.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.test_twistd` Module -============================================================================== -.. automodule:: nova..tests.test_twistd - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..twistd.rst b/doc/source/api/nova..twistd.rst deleted file mode 100644 index d4145396d..000000000 --- a/doc/source/api/nova..twistd.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..twistd` Module -============================================================================== -.. automodule:: nova..twistd - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/code.rst b/doc/source/code.rst index 6b8d5661f..73fc31e1a 100644 --- a/doc/source/code.rst +++ b/doc/source/code.rst @@ -21,7 +21,6 @@ Generating source/api/nova..cloudpipe.pipelib.rst Generating source/api/nova..compute.disk.rst Generating source/api/nova..compute.instance_types.rst Generating source/api/nova..compute.manager.rst -Generating source/api/nova..compute.monitor.rst Generating source/api/nova..compute.power_state.rst Generating source/api/nova..context.rst Generating source/api/nova..crypto.rst @@ -79,11 +78,9 @@ Generating source/api/nova..tests.rpc_unittest.rst Generating source/api/nova..tests.runtime_flags.rst Generating source/api/nova..tests.scheduler_unittest.rst Generating source/api/nova..tests.service_unittest.rst -Generating source/api/nova..tests.twistd_unittest.rst Generating source/api/nova..tests.validator_unittest.rst Generating source/api/nova..tests.virt_unittest.rst Generating source/api/nova..tests.volume_unittest.rst -Generating source/api/nova..twistd.rst Generating source/api/nova..utils.rst Generating source/api/nova..validate.rst Generating source/api/nova..virt.connection.rst diff --git a/doc/source/devref/architecture.rst b/doc/source/devref/architecture.rst index 233cd6f08..7f44ecdf2 100644 --- a/doc/source/devref/architecture.rst +++ b/doc/source/devref/architecture.rst @@ -45,7 +45,7 @@ Below you will find a helpful explanation of the different components. * Web Dashboard: potential external component that talks to the api * api: component that receives http requests, converts commands and communicates with other components via the queue or http (in the case of objectstore) * Auth Manager: component responsible for users/projects/and roles. Can backend to DB or LDAP. This is not a separate binary, but rather a python class that is used by most components in the system. -* objectstore: twisted http server that replicates s3 api and allows storage and retrieval of images +* objectstore: http server that replicates s3 api and allows storage and retrieval of images * scheduler: decides which host gets each vm and volume * volume: manages dynamically attachable block devices. * network: manages ip forwarding, bridges, and vlans diff --git a/doc/source/devref/compute.rst b/doc/source/devref/compute.rst index 31cc2037f..50397cbec 100644 --- a/doc/source/devref/compute.rst +++ b/doc/source/devref/compute.rst @@ -118,19 +118,6 @@ The :mod:`nova.virt.fake` Driver :show-inheritance: -Monitoring ----------- - -The :mod:`nova.compute.monitor` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.compute.monitor - :noindex: - :members: - :undoc-members: - :show-inheritance: - - Tests ----- diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst index f3c454d64..09f1eb2c2 100644 --- a/doc/source/devref/development.environment.rst +++ b/doc/source/devref/development.environment.rst @@ -51,7 +51,7 @@ To activate the Nova virtualenv for the extent of your current shell session Also, make test will automatically use the virtualenv. -If you don't want to create a virtualenv every time you branch (which takes a while as long as we have the large Twisted project as a dependency) you can reuse a single virtualenv for all branches. +If you don't want to create a virtualenv every time you branch you can reuse a single virtualenv for all branches. #. If you don't have a nova/ directory containing trunk/ and other branches, do so now. #. Go into nova/trunk and install a virtualenv. diff --git a/doc/source/devref/nova.rst b/doc/source/devref/nova.rst index 093fbb3ee..beca99ecd 100644 --- a/doc/source/devref/nova.rst +++ b/doc/source/devref/nova.rst @@ -102,16 +102,6 @@ The :mod:`nova.test` Module :show-inheritance: -The :mod:`nova.twistd` Module ------------------------------ - -.. automodule:: nova.twistd - :noindex: - :members: - :undoc-members: - :show-inheritance: - - The :mod:`nova.utils` Module ---------------------------- @@ -215,16 +205,6 @@ The :mod:`runtime_flags` Module :show-inheritance: -The :mod:`twistd_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.twistd_unittest - :noindex: - :members: - :undoc-members: - :show-inheritance: - - The :mod:`validator_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 6585f1751..d6a98c2cd 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -40,6 +40,7 @@ from nova.api.openstack import servers from nova.api.openstack import server_metadata from nova.api.openstack import shared_ip_groups from nova.api.openstack import users +from nova.api.openstack import versions from nova.api.openstack import wsgi from nova.api.openstack import zones @@ -96,6 +97,7 @@ class APIRouter(base_wsgi.Router): server_members['suspend'] = 'POST' server_members['resume'] = 'POST' server_members['rescue'] = 'POST' + server_members['migrate'] = 'POST' server_members['unrescue'] = 'POST' server_members['reset_network'] = 'POST' server_members['inject_network_info'] = 'POST' @@ -115,6 +117,10 @@ class APIRouter(base_wsgi.Router): 'select': 'POST', 'boot': 'POST'}) + mapper.connect("versions", "/", + controller=versions.create_resource(version), + action='show') + mapper.resource("console", "consoles", controller=consoles.create_resource(), parent_resource=dict(member_name='server', diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index b4a211857..3d8049324 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -27,9 +27,9 @@ from nova.api.openstack import extensions def _translate_floating_ip_view(floating_ip): result = {'id': floating_ip['id'], 'ip': floating_ip['address']} - if 'fixed_ip' in floating_ip: + try: result['fixed_ip'] = floating_ip['fixed_ip']['address'] - else: + except (TypeError, KeyError): result['fixed_ip'] = None if 'instance' in floating_ip: result['instance_id'] = floating_ip['instance']['id'] diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 694af7d61..87c5b884c 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -91,6 +91,11 @@ class CreateInstanceHelper(object): key_data = key_pair['public_key'] image_href = self.controller._image_ref_from_req_data(body) + # If the image href was generated by nova api, strip image_href + # down to an id and use the default glance connection params + + if str(image_href).startswith(req.application_url): + image_href = image_href.split('/').pop() try: image_service, image_id = nova.image.get_image_service(image_href) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( @@ -299,6 +304,37 @@ class ServerXMLDeserializer(wsgi.XMLDeserializer): metadata_deserializer = common.MetadataXMLDeserializer() + def action(self, string): + dom = minidom.parseString(string) + action_node = dom.childNodes[0] + action_name = action_node.tagName + + action_deserializer = { + 'createImage': self._action_create_image, + 'createBackup': self._action_create_backup, + }.get(action_name, self.default) + + action_data = action_deserializer(action_node) + + return {'body': {action_name: action_data}} + + def _action_create_image(self, node): + return self._deserialize_image_action(node, ('name',)) + + def _action_create_backup(self, node): + attributes = ('name', 'backup_type', 'rotation') + return self._deserialize_image_action(node, attributes) + + def _deserialize_image_action(self, node, allowed_attributes): + data = {} + for attribute in allowed_attributes: + value = node.getAttribute(attribute) + if value: + data[attribute] = value + metadata_node = self.find_first_child_named(node, 'metadata') + data['metadata'] = self.extract_metadata(metadata_node) + return data + def create(self, string): """Deserialize an xml-formatted server create request""" dom = minidom.parseString(string) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 9ba8b639e..0834adfa5 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -98,79 +98,34 @@ class Controller(object): self._image_service.delete(context, id) return webob.exc.HTTPNoContent() - def create(self, req, body): - """Snapshot or backup a server instance and save the image. - - Images now have an `image_type` associated with them, which can be - 'snapshot' or the backup type, like 'daily' or 'weekly'. - - If the image_type is backup-like, then the rotation factor can be - included and that will cause the oldest backups that exceed the - rotation factor to be deleted. - - :param req: `wsgi.Request` object - """ - def get_param(param): - try: - return body["image"][param] - except KeyError: - raise webob.exc.HTTPBadRequest(explanation="Missing required " - "param: %s" % param) - - context = req.environ['nova.context'] - content_type = req.get_content_type() - - if not body: - raise webob.exc.HTTPBadRequest() - - image_type = body["image"].get("image_type", "snapshot") - - try: - server_id = self._server_id_from_req(req, body) - except KeyError: - raise webob.exc.HTTPBadRequest() - - image_name = get_param("name") - props = self._get_extra_properties(req, body) - - if image_type == "snapshot": - image = self._compute_service.snapshot( - context, server_id, image_name, - extra_properties=props) - elif image_type == "backup": - # NOTE(sirp): Unlike snapshot, backup is not a customer facing - # API call; rather, it's used by the internal backup scheduler - if not FLAGS.allow_admin_api: - raise webob.exc.HTTPBadRequest( - explanation="Admin API Required") - - backup_type = get_param("backup_type") - rotation = int(get_param("rotation")) - - image = self._compute_service.backup( - context, server_id, image_name, - backup_type, rotation, extra_properties=props) - else: - LOG.error(_("Invalid image_type '%s' passed") % image_type) - raise webob.exc.HTTPBadRequest(explanation="Invalue image_type: " - "%s" % image_type) - - return dict(image=self.get_builder(req).build(image, detail=True)) - def get_builder(self, request): """Indicates that you must use a Controller subclass.""" raise NotImplementedError() - def _server_id_from_req(self, req, data): - raise NotImplementedError() - - def _get_extra_properties(self, req, data): - return {} - class ControllerV10(Controller): """Version 1.0 specific controller logic.""" + def create(self, req, body): + """Snapshot a server instance and save the image.""" + try: + image = body["image"] + except (KeyError, TypeError): + msg = _("Invalid image entity") + raise webob.exc.HTTPBadRequest(explanation=msg) + + try: + image_name = image["name"] + server_id = image["serverId"] + except KeyError as missing_key: + msg = _("Image entity requires %s") % missing_key + raise webob.exc.HTTPBadRequest(explanation=msg) + + context = req.environ["nova.context"] + image = self._compute_service.snapshot(context, server_id, image_name) + + return dict(image=self.get_builder(req).build(image, detail=True)) + def get_builder(self, request): """Property to get the ViewBuilder class we need to use.""" base_url = request.application_url @@ -202,13 +157,6 @@ class ControllerV10(Controller): builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) - def _server_id_from_req(self, req, data): - try: - return data['image']['serverId'] - except KeyError: - msg = _("Expected serverId attribute on server entity.") - raise webob.exc.HTTPBadRequest(explanation=msg) - class ControllerV11(Controller): """Version 1.1 specific controller logic.""" @@ -246,37 +194,8 @@ class ControllerV11(Controller): builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) - def _server_id_from_req(self, req, data): - try: - server_ref = data['image']['serverRef'] - except KeyError: - msg = _("Expected serverRef attribute on server entity.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if not server_ref.startswith('http'): - return server_ref - - passed = urlparse.urlparse(server_ref) - expected = urlparse.urlparse(req.application_url) - version = expected.path.split('/')[1] - expected_prefix = "/%s/servers/" % version - _empty, _sep, server_id = passed.path.partition(expected_prefix) - scheme_ok = passed.scheme == expected.scheme - host_ok = passed.hostname == expected.hostname - port_ok = (passed.port == expected.port or - passed.port == FLAGS.osapi_port) - if not (scheme_ok and port_ok and host_ok and server_id): - msg = _("serverRef must match request url") - raise webob.exc.HTTPBadRequest(explanation=msg) - - return server_id - - def _get_extra_properties(self, req, data): - server_ref = data['image']['serverRef'] - if not server_ref.startswith('http'): - server_ref = os.path.join(req.application_url, 'servers', - server_ref) - return {'instance_ref': server_ref} + def create(self, *args, **kwargs): + raise webob.exc.HTTPMethodNotAllowed() class ImageXMLSerializer(wsgi.XMLDictSerializer): @@ -369,12 +288,6 @@ class ImageXMLSerializer(wsgi.XMLDictSerializer): image_dict['image']) return self.to_xml_string(node, True) - def create(self, image_dict): - xml_doc = minidom.Document() - node = self._image_to_xml_detailed(xml_doc, - image_dict['image']) - return self.to_xml_string(node, True) - def create_resource(version='1.0'): controller = { diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 30169d450..002b47edb 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -14,6 +14,7 @@ # under the License. import base64 +import os import traceback from webob import exc @@ -154,23 +155,94 @@ class Controller(object): @scheduler_api.redirect_handler def action(self, req, id, body): - """Multi-purpose method used to reboot, rebuild, or - resize a server""" + """Multi-purpose method used to take actions on a server""" - actions = { + self.actions = { 'changePassword': self._action_change_password, 'reboot': self._action_reboot, 'resize': self._action_resize, 'confirmResize': self._action_confirm_resize, 'revertResize': self._action_revert_resize, 'rebuild': self._action_rebuild, - 'migrate': self._action_migrate} + 'createImage': self._action_create_image, + } - for key in actions.keys(): + if FLAGS.allow_admin_api: + admin_actions = { + 'createBackup': self._action_create_backup, + } + self.actions.update(admin_actions) + + for key in self.actions.keys(): if key in body: - return actions[key](body, req, id) + return self.actions[key](body, req, id) + raise exc.HTTPNotImplemented() + def _action_create_backup(self, input_dict, req, instance_id): + """Backup a server instance. + + Images now have an `image_type` associated with them, which can be + 'snapshot' or the backup type, like 'daily' or 'weekly'. + + If the image_type is backup-like, then the rotation factor can be + included and that will cause the oldest backups that exceed the + rotation factor to be deleted. + + """ + entity = input_dict["createBackup"] + + try: + image_name = entity["name"] + backup_type = entity["backup_type"] + rotation = entity["rotation"] + + except KeyError as missing_key: + msg = _("createBackup entity requires %s attribute") % missing_key + raise webob.exc.HTTPBadRequest(explanation=msg) + + except TypeError: + msg = _("Malformed createBackup entity") + raise webob.exc.HTTPBadRequest(explanation=msg) + + try: + rotation = int(rotation) + except ValueError: + msg = _("createBackup attribute 'rotation' must be an integer") + raise webob.exc.HTTPBadRequest(explanation=msg) + + # preserve link to server in image properties + server_ref = os.path.join(req.application_url, + 'servers', + str(instance_id)) + props = {'instance_ref': server_ref} + + metadata = entity.get('metadata', {}) + try: + props.update(metadata) + except ValueError: + msg = _("Invalid metadata") + raise webob.exc.HTTPBadRequest(explanation=msg) + + context = req.environ["nova.context"] + image = self.compute_api.backup(context, + instance_id, + image_name, + backup_type, + rotation, + extra_properties=props) + + # build location of newly-created image entity + image_id = str(image['id']) + image_ref = os.path.join(req.application_url, 'images', image_id) + + resp = webob.Response(status_int=202) + resp.headers['Location'] = image_ref + return resp + + def _action_create_image(self, input_dict, req, id): + return exc.HTTPNotImplemented() + def _action_change_password(self, input_dict, req, id): return exc.HTTPNotImplemented() @@ -208,14 +280,6 @@ class Controller(object): raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) - def _action_migrate(self, input_dict, req, id): - try: - self.compute_api.resize(req.environ['nova.context'], id) - except Exception, e: - LOG.exception(_("Error in migrate %s"), e) - raise exc.HTTPBadRequest() - return webob.Response(status_int=202) - @scheduler_api.redirect_handler def lock(self, req, id): """ @@ -342,6 +406,15 @@ class Controller(object): return webob.Response(status_int=202) @scheduler_api.redirect_handler + def migrate(self, req, id): + try: + self.compute_api.resize(req.environ['nova.context'], id) + except Exception, e: + LOG.exception(_("Error in migrate %s"), e) + raise exc.HTTPBadRequest() + return webob.Response(status_int=202) + + @scheduler_api.redirect_handler def rescue(self, req, id): """Permit users to rescue the server.""" context = req.environ["nova.context"] @@ -405,6 +478,24 @@ class Controller(object): error=item.error)) return dict(actions=actions) + def resize(self, req, instance_id, flavor_id): + """Begin the resize process with given instance/flavor.""" + context = req.environ["nova.context"] + + try: + self.compute_api.resize(context, instance_id, flavor_id) + except exception.FlavorNotFound: + msg = _("Unable to locate requested flavor.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.CannotResizeToSameSize: + msg = _("Resize requires a change in size.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.CannotResizeToSmallerSize: + msg = _("Resizing to a smaller size is not supported.") + raise exc.HTTPBadRequest(explanation=msg) + + return webob.Response(status_int=202) + class ControllerV10(Controller): @@ -444,16 +535,7 @@ class ControllerV10(Controller): msg = _("Resize requests require 'flavorId' attribute.") raise exc.HTTPBadRequest(explanation=msg) - try: - i_type = instance_types.get_instance_type_by_flavor_id(flavor_id) - except exception.FlavorNotFound: - msg = _("Unable to locate requested flavor.") - raise exc.HTTPBadRequest(explanation=msg) - - context = req.environ["nova.context"] - self.compute_api.resize(context, id, i_type["id"]) - - return webob.Response(status_int=202) + return self.resize(req, id, flavor_id) def _action_rebuild(self, info, request, instance_id): context = request.environ['nova.context'] @@ -568,16 +650,7 @@ class ControllerV11(Controller): msg = _("Resize requests require 'flavorRef' attribute.") raise exc.HTTPBadRequest(explanation=msg) - try: - i_type = instance_types.get_instance_type_by_flavor_id(flavor_ref) - except exception.FlavorNotFound: - msg = _("Unable to locate requested flavor.") - raise exc.HTTPBadRequest(explanation=msg) - - context = req.environ["nova.context"] - self.compute_api.resize(context, id, i_type["id"]) - - return webob.Response(status_int=202) + return self.resize(req, id, flavor_ref) def _action_rebuild(self, info, request, instance_id): context = request.environ['nova.context'] @@ -607,6 +680,48 @@ class ControllerV11(Controller): return webob.Response(status_int=202) + def _action_create_image(self, input_dict, req, instance_id): + """Snapshot a server instance.""" + entity = input_dict.get("createImage", {}) + + try: + image_name = entity["name"] + + except KeyError: + msg = _("createImage entity requires name attribute") + raise webob.exc.HTTPBadRequest(explanation=msg) + + except TypeError: + msg = _("Malformed createImage entity") + raise webob.exc.HTTPBadRequest(explanation=msg) + + # preserve link to server in image properties + server_ref = os.path.join(req.application_url, + 'servers', + str(instance_id)) + props = {'instance_ref': server_ref} + + metadata = entity.get('metadata', {}) + try: + props.update(metadata) + except ValueError: + msg = _("Invalid metadata") + raise webob.exc.HTTPBadRequest(explanation=msg) + + context = req.environ['nova.context'] + image = self.compute_api.snapshot(context, + instance_id, + image_name, + extra_properties=props) + + # build location of newly-created image entity + image_id = str(image['id']) + image_ref = os.path.join(req.application_url, 'images', image_id) + + resp = webob.Response(status_int=202) + resp.headers['Location'] = image_ref + return resp + def get_default_xmlns(self, req): return common.XML_NS_V11 diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index df7a94b7e..3ef72b7f6 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -24,7 +24,66 @@ import nova.api.openstack.views.versions from nova.api.openstack import wsgi -ATOM_XMLNS = "http://www.w3.org/2005/Atom" +VERSIONS = { + "v1.0": { + "id": "v1.0", + "status": "DEPRECATED", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/cs-devguide-20110125.pdf" + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/application.wadl" + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.0+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.0+json" + } + ], + }, + "v1.1": { + "id": "v1.1", + "status": "CURRENT", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/cs-devguide-20110125.pdf" + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl" + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.1+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.1+json" + } + ], + }, +} class Versions(wsgi.Resource): @@ -36,16 +95,20 @@ class Versions(wsgi.Resource): } } + headers_serializer = VersionsHeadersSerializer() + body_serializers = { 'application/atom+xml': VersionsAtomSerializer(metadata=metadata), 'application/xml': VersionsXMLSerializer(metadata=metadata), } - serializer = wsgi.ResponseSerializer(body_serializers) + serializer = wsgi.ResponseSerializer( + body_serializers=body_serializers, + headers_serializer=headers_serializer) supported_content_types = ('application/json', 'application/xml', 'application/atom+xml') - deserializer = wsgi.RequestDeserializer( + deserializer = VersionsRequestDeserializer( supported_content_types=supported_content_types) wsgi.Resource.__init__(self, None, serializer=serializer, @@ -53,60 +116,131 @@ class Versions(wsgi.Resource): def dispatch(self, request, *args): """Respond to a request for all OpenStack API versions.""" - version_objs = [ - { - "id": "v1.1", - "status": "CURRENT", - #TODO(wwolf) get correct value for these - "updated": "2011-07-18T11:30:00Z", - }, - { - "id": "v1.0", - "status": "DEPRECATED", - #TODO(wwolf) get correct value for these - "updated": "2010-10-09T11:30:00Z", - }, - ] - builder = nova.api.openstack.views.versions.get_view_builder(request) - versions = [builder.build(version) for version in version_objs] - return dict(versions=versions) + if request.path == '/': + # List Versions + return builder.build_versions(VERSIONS) + else: + # Versions Multiple Choice + return builder.build_choices(VERSIONS, request) + + +class VersionV10(object): + def show(self, req): + builder = nova.api.openstack.views.versions.get_view_builder(req) + return builder.build_version(VERSIONS['v1.0']) + + +class VersionV11(object): + def show(self, req): + builder = nova.api.openstack.views.versions.get_view_builder(req) + return builder.build_version(VERSIONS['v1.1']) + + +class VersionsRequestDeserializer(wsgi.RequestDeserializer): + def get_expected_content_type(self, request): + supported_content_types = list(self.supported_content_types) + if request.path != '/': + # Remove atom+xml accept type for 300 responses + if 'application/atom+xml' in supported_content_types: + supported_content_types.remove('application/atom+xml') + + return request.best_match_content_type(supported_content_types) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + args = {} + if request_environment['PATH_INFO'] == '/': + args['action'] = 'index' + else: + args['action'] = 'multi' + + return args class VersionsXMLSerializer(wsgi.XMLDictSerializer): - def _versions_to_xml(self, versions): - root = self._xml_doc.createElement('versions') + #TODO(wwolf): this is temporary until we get rid of toprettyxml + # in the base class (XMLDictSerializer), which I plan to do in + # another branch + def to_xml_string(self, node, has_atom=False): + self._add_xmlns(node, has_atom) + return node.toxml(encoding='UTF-8') + + def _versions_to_xml(self, versions, name="versions", xmlns=None): + root = self._xml_doc.createElement(name) + root.setAttribute("xmlns", wsgi.XMLNS_V11) + root.setAttribute("xmlns:atom", wsgi.XMLNS_ATOM) for version in versions: root.appendChild(self._create_version_node(version)) return root - def _create_version_node(self, version): + def _create_media_types(self, media_types): + base = self._xml_doc.createElement('media-types') + for type in media_types: + node = self._xml_doc.createElement('media-type') + node.setAttribute('base', type['base']) + node.setAttribute('type', type['type']) + base.appendChild(node) + + return base + + def _create_version_node(self, version, create_ns=False): version_node = self._xml_doc.createElement('version') + if create_ns: + xmlns = wsgi.XMLNS_V11 + xmlns_atom = wsgi.XMLNS_ATOM + version_node.setAttribute('xmlns', xmlns) + version_node.setAttribute('xmlns:atom', xmlns_atom) + version_node.setAttribute('id', version['id']) version_node.setAttribute('status', version['status']) - version_node.setAttribute('updated', version['updated']) + if 'updated' in version: + version_node.setAttribute('updated', version['updated']) + + if 'media-types' in version: + media_types = self._create_media_types(version['media-types']) + version_node.appendChild(media_types) - for link in version['links']: - link_node = self._xml_doc.createElement('atom:link') - link_node.setAttribute('rel', link['rel']) - link_node.setAttribute('href', link['href']) - version_node.appendChild(link_node) + link_nodes = self._create_link_nodes(self._xml_doc, version['links']) + for link in link_nodes: + version_node.appendChild(link) return version_node - def default(self, data): + def index(self, data): self._xml_doc = minidom.Document() node = self._versions_to_xml(data['versions']) return self.to_xml_string(node) + def show(self, data): + self._xml_doc = minidom.Document() + node = self._create_version_node(data['version'], True) + + return self.to_xml_string(node) + + def multi(self, data): + self._xml_doc = minidom.Document() + node = self._versions_to_xml(data['choices'], 'choices', + xmlns=wsgi.XMLNS_V11) + + return self.to_xml_string(node) + class VersionsAtomSerializer(wsgi.XMLDictSerializer): + #TODO(wwolf): this is temporary until we get rid of toprettyxml + # in the base class (XMLDictSerializer), which I plan to do in + # another branch + def to_xml_string(self, node, has_atom=False): + self._add_xmlns(node, has_atom) + return node.toxml(encoding='UTF-8') + def __init__(self, metadata=None, xmlns=None): + self.metadata = metadata or {} if not xmlns: - self.xmlns = ATOM_XMLNS + self.xmlns = wsgi.XMLNS_ATOM else: self.xmlns = xmlns @@ -135,8 +269,33 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): link_href = link_href.rstrip('/') return link_href.rsplit('/', 1)[0] + '/' - def _create_meta(self, root, versions): - title = self._create_text_elem('title', 'Available API Versions', + def _create_detail_meta(self, root, version): + title = self._create_text_elem('title', "About This Version", + type='text') + + updated = self._create_text_elem('updated', version['updated']) + + uri = version['links'][0]['href'] + id = self._create_text_elem('id', uri) + + link = self._xml_doc.createElement('link') + link.setAttribute('rel', 'self') + link.setAttribute('href', uri) + + author = self._xml_doc.createElement('author') + author_name = self._create_text_elem('name', 'Rackspace') + author_uri = self._create_text_elem('uri', 'http://www.rackspace.com/') + author.appendChild(author_name) + author.appendChild(author_uri) + + root.appendChild(title) + root.appendChild(updated) + root.appendChild(id) + root.appendChild(author) + root.appendChild(link) + + def _create_list_meta(self, root, versions): + title = self._create_text_elem('title', "Available API Versions", type='text') # Set this updated to the most recently updated version recent = self._get_most_recent_update(versions) @@ -144,6 +303,7 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): base_url = self._get_base_url(versions[0]['links'][0]['href']) id = self._create_text_elem('id', base_url) + link = self._xml_doc.createElement('link') link.setAttribute('rel', 'self') link.setAttribute('href', base_url) @@ -178,7 +338,10 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): link_node = self._xml_doc.createElement('link') link_node.setAttribute('rel', link['rel']) link_node.setAttribute('href', link['href']) - entry.appendChild(link_node) + if 'type' in link: + link_node.setAttribute('type', link['type']) + + entry.appendChild(link_node) content = self._create_text_elem('content', 'Version %s %s (%s)' % @@ -190,10 +353,45 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): entry.appendChild(content) root.appendChild(entry) - def default(self, data): + def index(self, data): self._xml_doc = minidom.Document() node = self._xml_doc.createElementNS(self.xmlns, 'feed') - self._create_meta(node, data['versions']) + self._create_list_meta(node, data['versions']) self._create_version_entries(node, data['versions']) return self.to_xml_string(node) + + def show(self, data): + self._xml_doc = minidom.Document() + node = self._xml_doc.createElementNS(self.xmlns, 'feed') + self._create_detail_meta(node, data['version']) + self._create_version_entries(node, [data['version']]) + + return self.to_xml_string(node) + + +class VersionsHeadersSerializer(wsgi.ResponseHeadersSerializer): + def multi(self, response, data): + response.status_int = 300 + + +def create_resource(version='1.0'): + controller = { + '1.0': VersionV10, + '1.1': VersionV11, + }[version]() + + body_serializers = { + 'application/xml': VersionsXMLSerializer(), + 'application/atom+xml': VersionsAtomSerializer(), + } + serializer = wsgi.ResponseSerializer(body_serializers) + + supported_content_types = ('application/json', + 'application/xml', + 'application/atom+xml') + deserializer = wsgi.RequestDeserializer( + supported_content_types=supported_content_types) + + return wsgi.Resource(controller, serializer=serializer, + deserializer=deserializer) diff --git a/nova/api/openstack/views/versions.py b/nova/api/openstack/views/versions.py index 9fa8f49dc..547289034 100644 --- a/nova/api/openstack/views/versions.py +++ b/nova/api/openstack/views/versions.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import os @@ -31,16 +32,44 @@ class ViewBuilder(object): """ self.base_url = base_url - def build(self, version_data): - """Generic method used to generate a version entity.""" - version = { - "id": version_data["id"], - "status": version_data["status"], - "updated": version_data["updated"], - "links": self._build_links(version_data), - } + def build_choices(self, VERSIONS, req): + version_objs = [] + for version in VERSIONS: + version = VERSIONS[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "links": [ + { + "rel": "self", + "href": self.generate_href(version['id'], req.path) + } + ], + "media-types": version['media-types'] + }) - return version + return dict(choices=version_objs) + + def build_versions(self, versions): + version_objs = [] + for version in versions: + version = versions[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "updated": version['updated'], + "links": self._build_links(version), + }) + + return dict(versions=version_objs) + + def build_version(self, version): + reval = copy.deepcopy(version) + reval['links'].insert(0, { + "rel": "self", + "href": self.base_url.rstrip('/') + '/', + }) + return dict(version=reval) def _build_links(self, version_data): """Generate a container of links that refer to the provided version.""" @@ -55,6 +84,11 @@ class ViewBuilder(object): return links - def generate_href(self, version_number): + def generate_href(self, version_number, path=None): """Create an url that refers to a specific version_number.""" - return os.path.join(self.base_url, version_number) + '/' + version_number = version_number.strip('/') + if path: + path = path.strip('/') + return os.path.join(self.base_url, version_number, path) + else: + return os.path.join(self.base_url, version_number) + '/' diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 53dab22e8..0eb47044e 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -13,6 +13,7 @@ from nova import wsgi XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' + XMLNS_ATOM = 'http://www.w3.org/2005/Atom' LOG = logging.getLogger('nova.api.openstack.wsgi') @@ -386,6 +387,8 @@ class XMLDictSerializer(DictSerializer): link_node = xml_doc.createElement('atom:link') link_node.setAttribute('rel', link['rel']) link_node.setAttribute('href', link['href']) + if 'type' in link: + link_node.setAttribute('type', link['type']) link_nodes.append(link_node) return link_nodes diff --git a/nova/compute/api.py b/nova/compute/api.py index 8f7b3c3ef..aae16d1da 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -940,18 +940,15 @@ class API(base.Base): LOG.debug(_("Old instance type %(current_instance_type_name)s, " " new instance type %(new_instance_type_name)s") % locals()) if not new_instance_type: - raise exception.ApiError(_("Requested flavor %(flavor_id)d " - "does not exist") % locals()) + raise exception.FlavorNotFound(flavor_id=flavor_id) current_memory_mb = current_instance_type['memory_mb'] new_memory_mb = new_instance_type['memory_mb'] if current_memory_mb > new_memory_mb: - raise exception.ApiError(_("Invalid flavor: cannot downsize" - "instances")) + raise exception.CannotResizeToSmallerSize() if (current_memory_mb == new_memory_mb) and flavor_id: - raise exception.ApiError(_("Invalid flavor: cannot use" - "the same flavor. ")) + raise exception.CannotResizeToSameSize() instance_ref = self._get_instance(context, instance_id, 'resize') self._cast_scheduler_message(context, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index a2d84cd76..cf4ee229d 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -326,7 +326,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id, power_state.BUILDING) try: - self.driver.spawn(instance, network_info, bd_mapping) + self.driver.spawn(context, instance, network_info, bd_mapping) except Exception as ex: # pylint: disable=W0702 msg = _("Instance '%(instance_id)s' failed to spawn. Is " "virtualization enabled in the BIOS? Details: " @@ -433,7 +433,7 @@ class ComputeManager(manager.SchedulerDependentManager): network_info = self.network_api.get_instance_nw_info(context, instance_ref) bd_mapping = self._setup_block_device_mapping(context, instance_id) - self.driver.spawn(instance_ref, network_info, bd_mapping) + self.driver.spawn(context, instance_ref, network_info, bd_mapping) self._update_image_ref(context, instance_id, image_ref) self._update_launched_at(context, instance_id) @@ -501,7 +501,7 @@ class ComputeManager(manager.SchedulerDependentManager): 'instance: %(instance_id)s (state: %(state)s ' 'expected: %(running)s)') % locals()) - self.driver.snapshot(instance_ref, image_id) + self.driver.snapshot(context, instance_ref, image_id) if image_type == 'snapshot': if rotation: @@ -660,7 +660,7 @@ class ComputeManager(manager.SchedulerDependentManager): _update_state = lambda result: self._update_state_callback( self, context, instance_id, result) network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.rescue(instance_ref, _update_state, network_info) + self.driver.rescue(context, instance_ref, _update_state, network_info) self._update_state(context, instance_id) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -863,8 +863,8 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref.uuid) network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.finish_migration(instance_ref, disk_info, network_info, - resize_instance) + self.driver.finish_migration(context, instance_ref, disk_info, + network_info, resize_instance) self.db.migration_update(context, migration_id, {'status': 'finished', }) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py deleted file mode 100644 index 9d8e2a25d..000000000 --- a/nova/compute/monitor.py +++ /dev/null @@ -1,435 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Instance Monitoring: - - Optionally may be run on each compute node. Provides RRD - based statistics and graphs and makes them internally available - in the object store. -""" - -import datetime -import os -import time - -import boto -import boto.s3 -import rrdtool -from twisted.internet import task -from twisted.application import service - -from nova import flags -from nova import log as logging -from nova import utils -from nova.virt import connection as virt_connection - - -FLAGS = flags.FLAGS -flags.DEFINE_integer('monitoring_instances_delay', 5, - 'Sleep time between updates') -flags.DEFINE_integer('monitoring_instances_step', 300, - 'Interval of RRD updates') -flags.DEFINE_string('monitoring_rrd_path', '$state_path/monitor/instances', - 'Location of RRD files') - - -RRD_VALUES = { - 'cpu': [ - 'DS:cpu:GAUGE:600:0:100', - 'RRA:AVERAGE:0.5:1:800', - 'RRA:AVERAGE:0.5:6:800', - 'RRA:AVERAGE:0.5:24:800', - 'RRA:AVERAGE:0.5:288:800', - 'RRA:MAX:0.5:1:800', - 'RRA:MAX:0.5:6:800', - 'RRA:MAX:0.5:24:800', - 'RRA:MAX:0.5:288:800', - ], - 'net': [ - 'DS:rx:COUNTER:600:0:1250000', - 'DS:tx:COUNTER:600:0:1250000', - 'RRA:AVERAGE:0.5:1:800', - 'RRA:AVERAGE:0.5:6:800', - 'RRA:AVERAGE:0.5:24:800', - 'RRA:AVERAGE:0.5:288:800', - 'RRA:MAX:0.5:1:800', - 'RRA:MAX:0.5:6:800', - 'RRA:MAX:0.5:24:800', - 'RRA:MAX:0.5:288:800', - ], - 'disk': [ - 'DS:rd:COUNTER:600:U:U', - 'DS:wr:COUNTER:600:U:U', - 'RRA:AVERAGE:0.5:1:800', - 'RRA:AVERAGE:0.5:6:800', - 'RRA:AVERAGE:0.5:24:800', - 'RRA:AVERAGE:0.5:288:800', - 'RRA:MAX:0.5:1:800', - 'RRA:MAX:0.5:6:800', - 'RRA:MAX:0.5:24:800', - 'RRA:MAX:0.5:444:800', - ]} - - -utcnow = utils.utcnow - - -LOG = logging.getLogger('nova.compute.monitor') - - -def update_rrd(instance, name, data): - """ - Updates the specified RRD file. - """ - filename = os.path.join(instance.get_rrd_path(), '%s.rrd' % name) - - if not os.path.exists(filename): - init_rrd(instance, name) - - timestamp = int(time.mktime(utcnow().timetuple())) - rrdtool.update(filename, '%d:%s' % (timestamp, data)) - - -def init_rrd(instance, name): - """ - Initializes the specified RRD file. - """ - path = os.path.join(FLAGS.monitoring_rrd_path, instance.instance_id) - - if not os.path.exists(path): - os.makedirs(path) - - filename = os.path.join(path, '%s.rrd' % name) - - if not os.path.exists(filename): - rrdtool.create( - filename, - '--step', '%d' % FLAGS.monitoring_instances_step, - '--start', '0', - *RRD_VALUES[name]) - - -def graph_cpu(instance, duration): - """ - Creates a graph of cpu usage for the specified instance and duration. - """ - path = instance.get_rrd_path() - filename = os.path.join(path, 'cpu-%s.png' % duration) - - rrdtool.graph( - filename, - '--disable-rrdtool-tag', - '--imgformat', 'PNG', - '--width', '400', - '--height', '120', - '--start', 'now-%s' % duration, - '--vertical-label', '% cpu used', - '-l', '0', - '-u', '100', - 'DEF:cpu=%s:cpu:AVERAGE' % os.path.join(path, 'cpu.rrd'), - 'AREA:cpu#eacc00:% CPU',) - - store_graph(instance.instance_id, filename) - - -def graph_net(instance, duration): - """ - Creates a graph of network usage for the specified instance and duration. - """ - path = instance.get_rrd_path() - filename = os.path.join(path, 'net-%s.png' % duration) - - rrdtool.graph( - filename, - '--disable-rrdtool-tag', - '--imgformat', 'PNG', - '--width', '400', - '--height', '120', - '--start', 'now-%s' % duration, - '--vertical-label', 'bytes/s', - '--logarithmic', - '--units', 'si', - '--lower-limit', '1000', - '--rigid', - 'DEF:rx=%s:rx:AVERAGE' % os.path.join(path, 'net.rrd'), - 'DEF:tx=%s:tx:AVERAGE' % os.path.join(path, 'net.rrd'), - 'AREA:rx#00FF00:In traffic', - 'LINE1:tx#0000FF:Out traffic',) - - store_graph(instance.instance_id, filename) - - -def graph_disk(instance, duration): - """ - Creates a graph of disk usage for the specified duration. - """ - path = instance.get_rrd_path() - filename = os.path.join(path, 'disk-%s.png' % duration) - - rrdtool.graph( - filename, - '--disable-rrdtool-tag', - '--imgformat', 'PNG', - '--width', '400', - '--height', '120', - '--start', 'now-%s' % duration, - '--vertical-label', 'bytes/s', - '--logarithmic', - '--units', 'si', - '--lower-limit', '1000', - '--rigid', - 'DEF:rd=%s:rd:AVERAGE' % os.path.join(path, 'disk.rrd'), - 'DEF:wr=%s:wr:AVERAGE' % os.path.join(path, 'disk.rrd'), - 'AREA:rd#00FF00:Read', - 'LINE1:wr#0000FF:Write',) - - store_graph(instance.instance_id, filename) - - -def store_graph(instance_id, filename): - """ - Transmits the specified graph file to internal object store on cloud - controller. - """ - # TODO(devcamcar): Need to use an asynchronous method to make this - # connection. If boto has some separate method that generates - # the request it would like to make and another method to parse - # the response we can make our own client that does the actual - # request and hands it off to the response parser. - s3 = boto.s3.connection.S3Connection( - aws_access_key_id=FLAGS.aws_access_key_id, - aws_secret_access_key=FLAGS.aws_secret_access_key, - is_secure=False, - calling_format=boto.s3.connection.OrdinaryCallingFormat(), - port=FLAGS.s3_port, - host=FLAGS.s3_host) - bucket_name = '_%s.monitor' % instance_id - - # Object store isn't creating the bucket like it should currently - # when it is first requested, so have to catch and create manually. - try: - bucket = s3.get_bucket(bucket_name) - except Exception: - bucket = s3.create_bucket(bucket_name) - - key = boto.s3.Key(bucket) - key.key = os.path.basename(filename) - key.set_contents_from_filename(filename) - - -class Instance(object): - def __init__(self, conn, instance_id): - self.conn = conn - self.instance_id = instance_id - self.last_updated = datetime.datetime.min - self.cputime = 0 - self.cputime_last_updated = None - - init_rrd(self, 'cpu') - init_rrd(self, 'net') - init_rrd(self, 'disk') - - def needs_update(self): - """ - Indicates whether this instance is due to have its statistics updated. - """ - delta = utcnow() - self.last_updated - return delta.seconds >= FLAGS.monitoring_instances_step - - def update(self): - """ - Updates the instances statistics and stores the resulting graphs - in the internal object store on the cloud controller. - """ - LOG.debug(_('updating %s...'), self.instance_id) - - try: - data = self.fetch_cpu_stats() - if data is not None: - LOG.debug('CPU: %s', data) - update_rrd(self, 'cpu', data) - - data = self.fetch_net_stats() - LOG.debug('NET: %s', data) - update_rrd(self, 'net', data) - - data = self.fetch_disk_stats() - LOG.debug('DISK: %s', data) - update_rrd(self, 'disk', data) - - # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls - # and make the methods @defer.inlineCallbacks. - graph_cpu(self, '1d') - graph_cpu(self, '1w') - graph_cpu(self, '1m') - - graph_net(self, '1d') - graph_net(self, '1w') - graph_net(self, '1m') - - graph_disk(self, '1d') - graph_disk(self, '1w') - graph_disk(self, '1m') - except Exception: - LOG.exception(_('unexpected error during update')) - - self.last_updated = utcnow() - - def get_rrd_path(self): - """ - Returns the path to where RRD files are stored. - """ - return os.path.join(FLAGS.monitoring_rrd_path, self.instance_id) - - def fetch_cpu_stats(self): - """ - Returns cpu usage statistics for this instance. - """ - info = self.conn.get_info(self.instance_id) - - # Get the previous values. - cputime_last = self.cputime - cputime_last_updated = self.cputime_last_updated - - # Get the raw CPU time used in nanoseconds. - self.cputime = float(info['cpu_time']) - self.cputime_last_updated = utcnow() - - LOG.debug('CPU: %d', self.cputime) - - # Skip calculation on first pass. Need delta to get a meaningful value. - if cputime_last_updated is None: - return None - - # Calculate the number of seconds between samples. - d = self.cputime_last_updated - cputime_last_updated - t = d.days * 86400 + d.seconds - - LOG.debug('t = %d', t) - - # Calculate change over time in number of nanoseconds of CPU time used. - cputime_delta = self.cputime - cputime_last - - LOG.debug('cputime_delta = %s', cputime_delta) - - # Get the number of virtual cpus in this domain. - vcpus = int(info['num_cpu']) - - LOG.debug('vcpus = %d', vcpus) - - # Calculate CPU % used and cap at 100. - return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100) - - def fetch_disk_stats(self): - """ - Returns disk usage statistics for this instance. - """ - rd = 0 - wr = 0 - - disks = self.conn.get_disks(self.instance_id) - - # Aggregate the read and write totals. - for disk in disks: - try: - rd_req, rd_bytes, wr_req, wr_bytes, errs = \ - self.conn.block_stats(self.instance_id, disk) - rd += rd_bytes - wr += wr_bytes - except TypeError: - iid = self.instance_id - LOG.error(_('Cannot get blockstats for "%(disk)s"' - ' on "%(iid)s"') % locals()) - raise - - return '%d:%d' % (rd, wr) - - def fetch_net_stats(self): - """ - Returns network usage statistics for this instance. - """ - rx = 0 - tx = 0 - - interfaces = self.conn.get_interfaces(self.instance_id) - - # Aggregate the in and out totals. - for interface in interfaces: - try: - stats = self.conn.interface_stats(self.instance_id, interface) - rx += stats[0] - tx += stats[4] - except TypeError: - iid = self.instance_id - LOG.error(_('Cannot get ifstats for "%(interface)s"' - ' on "%(iid)s"') % locals()) - raise - - return '%d:%d' % (rx, tx) - - -class InstanceMonitor(object, service.Service): - """ - Monitors the running instances of the current machine. - """ - - def __init__(self): - """ - Initialize the monitoring loop. - """ - self._instances = {} - self._loop = task.LoopingCall(self.updateInstances) - - def startService(self): - self._instances = {} - self._loop.start(interval=FLAGS.monitoring_instances_delay) - service.Service.startService(self) - - def stopService(self): - self._loop.stop() - service.Service.stopService(self) - - def updateInstances(self): - """ - Update resource usage for all running instances. - """ - try: - conn = virt_connection.get_connection(read_only=True) - except Exception, exn: - LOG.exception(_('unexpected exception getting connection')) - time.sleep(FLAGS.monitoring_instances_delay) - return - - domain_ids = conn.list_instances() - try: - self.updateInstances_(conn, domain_ids) - except Exception, exn: - LOG.exception('updateInstances_') - - def updateInstances_(self, conn, domain_ids): - for domain_id in domain_ids: - if not domain_id in self._instances: - instance = Instance(conn, domain_id) - self._instances[domain_id] = instance - LOG.debug(_('Found instance: %s'), domain_id) - - for key in self._instances.keys(): - instance = self._instances[key] - if instance.needs_update(): - instance.update() diff --git a/nova/context.py b/nova/context.py index 5b2776d4e..b917a1d81 100644 --- a/nova/context.py +++ b/nova/context.py @@ -32,7 +32,7 @@ class RequestContext(object): def __init__(self, user_id, project_id, is_admin=None, read_deleted=False, roles=None, remote_address=None, timestamp=None, - request_id=None): + request_id=None, auth_token=None): self.user_id = user_id self.project_id = project_id self.roles = roles or [] @@ -49,6 +49,7 @@ class RequestContext(object): if not request_id: request_id = unicode(uuid.uuid4()) self.request_id = request_id + self.auth_token = auth_token def to_dict(self): return {'user_id': self.user_id, @@ -58,7 +59,8 @@ class RequestContext(object): 'roles': self.roles, 'remote_address': self.remote_address, 'timestamp': utils.strtime(self.timestamp), - 'request_id': self.request_id} + 'request_id': self.request_id, + 'auth_token': self.auth_token} @classmethod def from_dict(cls, values): @@ -74,7 +76,8 @@ class RequestContext(object): roles=self.roles, remote_address=self.remote_address, timestamp=self.timestamp, - request_id=self.request_id) + request_id=self.request_id, + auth_token=self.auth_token) def get_admin_context(read_deleted=False): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a13d60ec4..4f1445217 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1680,7 +1680,8 @@ def network_get_by_bridge(context, bridge): def network_get_by_cidr(context, cidr): session = get_session() result = session.query(models.Network).\ - filter_by(cidr=cidr).first() + filter(or_(models.Network.cidr == cidr, + models.Network.cidr_v6 == cidr)).first() if not result: raise exception.NetworkNotFoundForCidr(cidr=cidr) @@ -3041,13 +3042,18 @@ def instance_type_get_by_name(context, name): @require_context def instance_type_get_by_flavor_id(context, id): """Returns a dict describing specific flavor_id""" + try: + flavor_id = int(id) + except ValueError: + raise exception.FlavorNotFound(flavor_id=id) + session = get_session() inst_type = session.query(models.InstanceTypes).\ options(joinedload('extra_specs')).\ - filter_by(flavorid=int(id)).\ + filter_by(flavorid=flavor_id).\ first() if not inst_type: - raise exception.FlavorNotFound(flavor_id=id) + raise exception.FlavorNotFound(flavor_id=flavor_id) else: return _dict_with_extra_specs(inst_type) diff --git a/nova/exception.py b/nova/exception.py index 8c9b45a80..68e6ac937 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -692,3 +692,11 @@ class PasteConfigNotFound(NotFound): class PasteAppNotFound(NotFound): message = _("Could not load paste app '%(name)s' from %(path)s") + + +class CannotResizeToSameSize(NovaException): + message = _("When resizing, instances must change size!") + + +class CannotResizeToSmallerSize(NovaException): + message = _("Resizing to a smaller size is not supported.") diff --git a/nova/image/glance.py b/nova/image/glance.py index 5c2dc957b..44a3c6f83 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -83,11 +83,16 @@ class GlanceImageService(service.BaseImageService): client = property(_get_client, _set_client) + def _set_client_context(self, context): + """Sets the client's auth token.""" + self.client.set_auth_token(context.auth_token) + def index(self, context, filters=None, marker=None, limit=None): """Calls out to Glance for a list of images available.""" # NOTE(sirp): We need to use `get_images_detailed` and not # `get_images` here because we need `is_public` and `properties` # included so we can filter by user + self._set_client_context(context) filtered = [] filters = filters or {} if 'is_public' not in filters: @@ -104,6 +109,7 @@ class GlanceImageService(service.BaseImageService): def detail(self, context, filters=None, marker=None, limit=None): """Calls out to Glance for a list of detailed image information.""" + self._set_client_context(context) filtered = [] filters = filters or {} if 'is_public' not in filters: @@ -120,6 +126,7 @@ class GlanceImageService(service.BaseImageService): def show(self, context, image_id): """Returns a dict with image data for the given opaque image id.""" + self._set_client_context(context) try: image_meta = self.client.get_image_meta(image_id) except glance_exception.NotFound: @@ -143,6 +150,7 @@ class GlanceImageService(service.BaseImageService): def get(self, context, image_id, data): """Calls out to Glance for metadata and data and writes data.""" + self._set_client_context(context) try: image_meta, image_chunks = self.client.get_image(image_id) except glance_exception.NotFound: @@ -160,6 +168,7 @@ class GlanceImageService(service.BaseImageService): :raises: AlreadyExists if the image already exist. """ + self._set_client_context(context) # Translate Base -> Service LOG.debug(_('Creating image in Glance. Metadata passed in %s'), image_meta) @@ -182,6 +191,7 @@ class GlanceImageService(service.BaseImageService): :raises: ImageNotFound if the image does not exist. """ + self._set_client_context(context) # NOTE(vish): show is to check if image is available self.show(context, image_id) try: @@ -198,6 +208,7 @@ class GlanceImageService(service.BaseImageService): :raises: ImageNotFound if the image does not exist. """ + self._set_client_context(context) # NOTE(vish): show is to check if image is available self.show(context, image_id) try: diff --git a/nova/network/manager.py b/nova/network/manager.py index 4a3791d8a..8fc6a295f 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -410,8 +410,11 @@ class NetworkManager(manager.SchedulerDependentManager): kwargs can contain fixed_ips to circumvent another db lookup """ instance_id = kwargs.pop('instance_id') - fixed_ips = kwargs.get('fixed_ips') or \ + try: + fixed_ips = kwargs.get('fixed_ips') or \ self.db.fixed_ip_get_by_instance(context, instance_id) + except exceptions.FixedIpNotFoundForInstance: + fixed_ips = [] LOG.debug(_("network deallocation for instance |%s|"), instance_id, context=context) # deallocate fixed ips @@ -551,15 +554,17 @@ class NetworkManager(manager.SchedulerDependentManager): # with a network, or a cluster of computes with a network # and use that network here with a method like # network_get_by_compute_host - address = self.db.fixed_ip_associate_pool(context.elevated(), - network['id'], - instance_id) - vif = self.db.virtual_interface_get_by_instance_and_network(context, - instance_id, - network['id']) - values = {'allocated': True, - 'virtual_interface_id': vif['id']} - self.db.fixed_ip_update(context, address, values) + address = None + if network['cidr']: + address = self.db.fixed_ip_associate_pool(context.elevated(), + network['id'], + instance_id) + get_vif = self.db.virtual_interface_get_by_instance_and_network + vif = get_vif(context, instance_id, network['id']) + values = {'allocated': True, + 'virtual_interface_id': vif['id']} + self.db.fixed_ip_update(context, address, values) + self._setup_network(context, network) return address @@ -613,34 +618,39 @@ class NetworkManager(manager.SchedulerDependentManager): network_size, cidr_v6, gateway_v6, bridge, bridge_interface, dns1=None, dns2=None, **kwargs): """Create networks based on parameters.""" - fixed_net = netaddr.IPNetwork(cidr) - if FLAGS.use_ipv6: + if cidr_v6: fixed_net_v6 = netaddr.IPNetwork(cidr_v6) significant_bits_v6 = 64 network_size_v6 = 1 << 64 - for index in range(num_networks): - start = index * network_size + if cidr: + fixed_net = netaddr.IPNetwork(cidr) significant_bits = 32 - int(math.log(network_size, 2)) - cidr = '%s/%s' % (fixed_net[start], significant_bits) - project_net = netaddr.IPNetwork(cidr) + + for index in range(num_networks): net = {} net['bridge'] = bridge net['bridge_interface'] = bridge_interface net['dns1'] = dns1 net['dns2'] = dns2 - net['cidr'] = cidr - net['multi_host'] = multi_host - net['netmask'] = str(project_net.netmask) - net['gateway'] = str(project_net[1]) - net['broadcast'] = str(project_net.broadcast) - net['dhcp_start'] = str(project_net[2]) + + if cidr: + start = index * network_size + project_net = netaddr.IPNetwork('%s/%s' % (fixed_net[start], + significant_bits)) + net['cidr'] = str(project_net) + net['multi_host'] = multi_host + net['netmask'] = str(project_net.netmask) + net['gateway'] = str(project_net[1]) + net['broadcast'] = str(project_net.broadcast) + net['dhcp_start'] = str(project_net[2]) + if num_networks > 1: net['label'] = '%s_%d' % (label, index) else: net['label'] = label - if FLAGS.use_ipv6: + if cidr_v6: start_v6 = index * network_size_v6 cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], significant_bits_v6) @@ -673,11 +683,11 @@ class NetworkManager(manager.SchedulerDependentManager): # None if network with cidr or cidr_v6 already exists network = self.db.network_create_safe(context, net) - if network: + if not network: + raise ValueError(_('Network already exists!')) + + if network and cidr: self._create_fixed_ips(context, network['id']) - else: - raise ValueError(_('Network with cidr %s already exists') % - cidr) @property def _bottom_reserved_ips(self): # pylint: disable=R0201 diff --git a/nova/notifier/api.py b/nova/notifier/api.py index 98969fd3e..e18f3e280 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -80,6 +80,10 @@ def notify(publisher_id, event_type, priority, payload): if priority not in log_levels: raise BadPriorityException( _('%s not in valid priorities' % priority)) + + # Ensure everything is JSON serializable. + payload = utils.to_primitive(payload, convert_instances=True) + driver = utils.import_object(FLAGS.notification_driver) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index 50ad7de08..ab7ae2e54 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -106,6 +106,11 @@ class FloatingIpTest(test.TestCase): self.assertEqual(view['floating_ip']['fixed_ip'], None) self.assertEqual(view['floating_ip']['instance_id'], None) + def test_translate_floating_ip_view_dict(self): + floating_ip = {'id': 0, 'address': '10.0.0.10', 'fixed_ip': None} + view = _translate_floating_ip_view(floating_ip) + self.assertTrue('floating_ip' in view) + def test_floating_ips_list(self): req = webob.Request.blank('/v1.1/os-floating-ips') res = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 28969d5f8..a67a28a4e 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -113,8 +113,7 @@ def stub_out_key_pair_funcs(stubs, have_key_pair=True): def stub_out_image_service(stubs): def fake_get_image_service(image_href): - image_id = int(str(image_href).split('/')[-1]) - return (nova.image.fake.FakeImageService(), image_id) + return (nova.image.fake.FakeImageService(), image_href) stubs.Set(nova.image, 'get_image_service', fake_get_image_service) stubs.Set(nova.image, 'get_default_image_service', lambda: nova.image.fake.FakeImageService()) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 8c5ad7f8d..942c0b333 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -1042,82 +1042,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): response = req.get_response(fakes.wsgi_app()) self.assertEqual(400, response.status_int) - def test_create_backup_no_name(self): - """Name is also required for backups""" - body = dict(image=dict(serverId='123', image_type='backup', - backup_type='daily', rotation=1)) - req = webob.Request.blank('/v1.0/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - - def test_create_backup_with_rotation_and_backup_type(self): - """The happy path for creating backups - - Creating a backup is an admin-only operation, as opposed to snapshots - which are available to anybody. - """ - # FIXME(sirp): teardown needed? - FLAGS.allow_admin_api = True - - # FIXME(sirp): should the fact that backups are admin_only be a FLAG - body = dict(image=dict(serverId='123', image_type='backup', - name='Backup 1', - backup_type='daily', rotation=1)) - req = webob.Request.blank('/v1.0/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(200, response.status_int) - - def test_create_backup_no_rotation(self): - """Rotation is required for backup requests""" - # FIXME(sirp): teardown needed? - FLAGS.allow_admin_api = True - - # FIXME(sirp): should the fact that backups are admin_only be a FLAG - body = dict(image=dict(serverId='123', name='daily', - image_type='backup', backup_type='daily')) - req = webob.Request.blank('/v1.0/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - - def test_create_backup_no_backup_type(self): - """Backup Type (daily or weekly) is required for backup requests""" - # FIXME(sirp): teardown needed? - FLAGS.allow_admin_api = True - - # FIXME(sirp): should the fact that backups are admin_only be a FLAG - body = dict(image=dict(serverId='123', name='daily', - image_type='backup', rotation=1)) - req = webob.Request.blank('/v1.0/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - - def test_create_image_with_invalid_image_type(self): - """Valid image_types are snapshot | daily | weekly""" - # FIXME(sirp): teardown needed? - FLAGS.allow_admin_api = True - - # FIXME(sirp): should the fact that backups are admin_only be a FLAG - body = dict(image=dict(serverId='123', image_type='monthly', - rotation=1)) - req = webob.Request.blank('/v1.0/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - def test_create_image_no_server_id(self): body = dict(image=dict(name='Snapshot 1')) @@ -1128,113 +1052,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): response = req.get_response(fakes.wsgi_app()) self.assertEqual(400, response.status_int) - def test_create_image_v1_1(self): - - body = dict(image=dict(serverRef='123', name='Snapshot 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(200, response.status_int) - - def test_create_image_v1_1_actual_server_ref(self): - - serverRef = 'http://localhost/v1.1/servers/1' - serverBookmark = 'http://localhost/servers/1' - body = dict(image=dict(serverRef=serverRef, name='Backup 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(200, response.status_int) - result = json.loads(response.body) - expected = { - 'id': 1, - 'links': [ - { - 'rel': 'self', - 'href': serverRef, - }, - { - 'rel': 'bookmark', - 'href': serverBookmark, - }, - ] - } - self.assertEqual(result['image']['server'], expected) - - def test_create_image_v1_1_actual_server_ref_port(self): - - serverRef = 'http://localhost:8774/v1.1/servers/1' - serverBookmark = 'http://localhost:8774/servers/1' - body = dict(image=dict(serverRef=serverRef, name='Backup 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(200, response.status_int) - result = json.loads(response.body) - expected = { - 'id': 1, - 'links': [ - { - 'rel': 'self', - 'href': serverRef, - }, - { - 'rel': 'bookmark', - 'href': serverBookmark, - }, - ] - } - self.assertEqual(result['image']['server'], expected) - - def test_create_image_v1_1_server_ref_bad_hostname(self): - - serverRef = 'http://asdf/v1.1/servers/1' - body = dict(image=dict(serverRef=serverRef, name='Backup 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - - def test_create_image_v1_1_no_server_ref(self): - - body = dict(image=dict(name='Snapshot 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - - def test_create_image_v1_1_server_ref_missing_version(self): - - serverRef = 'http://localhost/servers/1' - body = dict(image=dict(serverRef=serverRef, name='Backup 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - - def test_create_image_v1_1_server_ref_missing_id(self): - - serverRef = 'http://localhost/v1.1/servers' - body = dict(image=dict(serverRef=serverRef, name='Backup 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - @classmethod def _make_image_fixtures(cls): image_id = 123 @@ -1713,76 +1530,3 @@ class ImageXMLSerializationTest(test.TestCase): """.replace(" ", "") % (locals())) self.assertEqual(expected.toxml(), actual.toxml()) - - def test_create(self): - serializer = images.ImageXMLSerializer() - - fixture = { - 'image': { - 'id': 1, - 'name': 'Image1', - 'created': self.TIMESTAMP, - 'updated': self.TIMESTAMP, - 'status': 'SAVING', - 'progress': 80, - 'server': { - 'id': 1, - 'links': [ - { - 'href': self.SERVER_HREF, - 'rel': 'self', - }, - { - 'href': self.SERVER_BOOKMARK, - 'rel': 'bookmark', - }, - ], - }, - 'metadata': { - 'key1': 'value1', - }, - 'links': [ - { - 'href': self.IMAGE_HREF % 1, - 'rel': 'self', - }, - { - 'href': self.IMAGE_BOOKMARK % 1, - 'rel': 'bookmark', - }, - ], - }, - } - - output = serializer.serialize(fixture, 'create') - actual = minidom.parseString(output.replace(" ", "")) - - expected_server_href = self.SERVER_HREF - expected_server_bookmark = self.SERVER_BOOKMARK - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - <image id="1" - xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom" - name="Image1" - updated="%(expected_now)s" - created="%(expected_now)s" - status="SAVING" - progress="80"> - <server id="1"> - <atom:link rel="self" href="%(expected_server_href)s"/> - <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/> - </server> - <metadata> - <meta key="key1"> - value1 - </meta> - </metadata> - <atom:link href="%(expected_href)s" rel="self"/> - <atom:link href="%(expected_bookmark)s" rel="bookmark"/> - </image> - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index d824d6dcd..ed03cff53 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -260,6 +260,17 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.compute.API, "get_diagnostics", fake_compute_api) self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api) + fakes.stub_out_glance(self.stubs) + fakes.stub_out_compute_api_snapshot(self.stubs) + service_class = 'nova.image.glance.GlanceImageService' + self.service = utils.import_object(service_class) + self.context = context.RequestContext(1, None) + self.service.delete_all() + self.sent_to_glance = {} + fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance) + + self.allow_admin = FLAGS.allow_admin_api + self.webreq = common.webob_factory('/v1.0/servers') def test_get_server_by_id(self): @@ -1314,7 +1325,8 @@ class ServersTest(test.TestCase): def test_create_instance_v1_1(self): self._setup_for_create_instance() - image_href = 'http://localhost/images/2' + # proper local hrefs must start with 'http://localhost/v1.1/' + image_href = 'http://localhost/v1.1/images/2' flavor_ref = 'http://localhost/flavors/3' expected_flavor = { "id": "3", @@ -2315,8 +2327,9 @@ class ServersTest(test.TestCase): """This is basically the same as resize, only we provide the `migrate` attribute in the body's dict. """ - req = self.webreq('/1/action', 'POST', dict(migrate=None)) + req = self.webreq('/1/migrate', 'POST') + FLAGS.allow_admin_api = True self.resize_called = False def resize_mock(*args): @@ -2328,6 +2341,14 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 202) self.assertEqual(self.resize_called, True) + def test_migrate_server_no_admin_api_fails(self): + req = self.webreq('/1/migrate', 'POST') + + FLAGS.allow_admin_api = False + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 404) + def test_shutdown_status(self): new_server = return_server_with_power_state(power_state.SHUTDOWN) self.stubs.Set(nova.db.api, 'instance_get', new_server) @@ -2346,6 +2367,268 @@ class ServersTest(test.TestCase): res_dict = json.loads(res.body) self.assertEqual(res_dict['server']['status'], 'SHUTOFF') + def test_create_image_v1_1(self): + body = { + 'createImage': { + 'name': 'Snapshot 1', + }, + } + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(202, response.status_int) + location = response.headers['Location'] + self.assertEqual('http://localhost/v1.1/images/123', location) + + def test_create_image_v1_1_with_metadata(self): + body = { + 'createImage': { + 'name': 'Snapshot 1', + 'metadata': {'key': 'asdf'}, + }, + } + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(202, response.status_int) + location = response.headers['Location'] + self.assertEqual('http://localhost/v1.1/images/123', location) + + def test_create_image_v1_1_no_name(self): + body = { + 'createImage': {}, + } + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_image_v1_1_bad_metadata(self): + body = { + 'createImage': { + 'name': 'geoff', + 'metadata': 'henry', + }, + } + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_backup(self): + """The happy path for creating backups""" + FLAGS.allow_admin_api = True + + body = { + 'createBackup': { + 'name': 'Backup 1', + 'backup_type': 'daily', + 'rotation': 1, + }, + } + + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(202, response.status_int) + self.assertTrue(response.headers['Location']) + + def test_create_backup_v1_1(self): + """The happy path for creating backups through v1.1 api""" + FLAGS.allow_admin_api = True + + body = { + 'createBackup': { + 'name': 'Backup 1', + 'backup_type': 'daily', + 'rotation': 1, + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(202, response.status_int) + self.assertTrue(response.headers['Location']) + + def test_create_backup_admin_api_off(self): + """The happy path for creating backups""" + FLAGS.allow_admin_api = False + + body = { + 'createBackup': { + 'name': 'Backup 1', + 'backup_type': 'daily', + 'rotation': 1, + }, + } + + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(501, response.status_int) + + def test_create_backup_with_metadata(self): + FLAGS.allow_admin_api = True + + body = { + 'createBackup': { + 'name': 'Backup 1', + 'backup_type': 'daily', + 'rotation': 1, + 'metadata': {'123': 'asdf'}, + }, + } + + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(202, response.status_int) + self.assertTrue(response.headers['Location']) + + def test_create_backup_no_name(self): + """Name is required for backups""" + FLAGS.allow_admin_api = True + + body = { + 'createBackup': { + 'backup_type': 'daily', + 'rotation': 1, + }, + } + + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_backup_no_rotation(self): + """Rotation is required for backup requests""" + FLAGS.allow_admin_api = True + + body = { + 'createBackup': { + 'name': 'Backup 1', + 'backup_type': 'daily', + }, + } + + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_backup_no_backup_type(self): + """Backup Type (daily or weekly) is required for backup requests""" + FLAGS.allow_admin_api = True + + body = { + 'createBackup': { + 'name': 'Backup 1', + 'rotation': 1, + }, + } + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_backup_bad_entity(self): + FLAGS.allow_admin_api = True + + body = {'createBackup': 'go'} + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + +class TestServerActionXMLDeserializer(test.TestCase): + + def setUp(self): + self.deserializer = create_instance_helper.ServerXMLDeserializer() + + def tearDown(self): + pass + + def test_create_image(self): + serial_request = """ +<createImage xmlns="http://docs.openstack.org/compute/api/v1.1" + name="new-server-test"/>""" + request = self.deserializer.deserialize(serial_request, 'action') + expected = { + "createImage": { + "name": "new-server-test", + "metadata": {}, + }, + } + self.assertEquals(request['body'], expected) + + def test_create_image_with_metadata(self): + serial_request = """ +<createImage xmlns="http://docs.openstack.org/compute/api/v1.1" + name="new-server-test"> + <metadata> + <meta key="key1">value1</meta> + </metadata> +</createImage>""" + request = self.deserializer.deserialize(serial_request, 'action') + expected = { + "createImage": { + "name": "new-server-test", + "metadata": {"key1": "value1"}, + }, + } + self.assertEquals(request['body'], expected) + + def test_create_backup_with_metadata(self): + serial_request = """ +<createBackup xmlns="http://docs.openstack.org/compute/api/v1.1" + name="new-server-test" + rotation="12" + backup_type="daily"> + <metadata> + <meta key="key1">value1</meta> + </metadata> +</createBackup>""" + request = self.deserializer.deserialize(serial_request, 'action') + expected = { + "createBackup": { + "name": "new-server-test", + "rotation": "12", + "backup_type": "daily", + "metadata": {"key1": "value1"}, + }, + } + self.assertEquals(request['body'], expected) + class TestServerCreateRequestXMLDeserializerV10(unittest.TestCase): @@ -2816,7 +3099,7 @@ class TestServerCreateRequestXMLDeserializerV11(test.TestCase): self.assertEquals(request['body'], expected) -class TextAddressesXMLSerialization(test.TestCase): +class TestAddressesXMLSerialization(test.TestCase): serializer = nova.api.openstack.ips.IPXMLSerializer() diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index da964ee1f..e68455778 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -16,21 +16,92 @@ # under the License. import json +import stubout import webob +import xml.etree.ElementTree + from nova import context from nova import test from nova.tests.api.openstack import fakes from nova.api.openstack import versions from nova.api.openstack import views +from nova.api.openstack import wsgi + +VERSIONS = { + "v1.0": { + "id": "v1.0", + "status": "DEPRECATED", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/cs-devguide-20110125.pdf" + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/application.wadl" + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.0+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.0+json" + } + ], + }, + "v1.1": { + "id": "v1.1", + "status": "CURRENT", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/cs-devguide-20110125.pdf" + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl" + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.1+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.1+json" + } + ], + }, +} class VersionsTest(test.TestCase): def setUp(self): super(VersionsTest, self).setUp() self.context = context.get_admin_context() + self.stubs = stubout.StubOutForTesting() + fakes.stub_out_auth(self.stubs) + #Stub out VERSIONS + self.old_versions = versions.VERSIONS + versions.VERSIONS = VERSIONS def tearDown(self): + versions.VERSIONS = self.old_versions super(VersionsTest, self).tearDown() def test_get_version_list(self): @@ -44,7 +115,7 @@ class VersionsTest(test.TestCase): { "id": "v1.1", "status": "CURRENT", - "updated": "2011-07-18T11:30:00Z", + "updated": "2011-01-21T11:33:21Z", "links": [ { "rel": "self", @@ -54,7 +125,7 @@ class VersionsTest(test.TestCase): { "id": "v1.0", "status": "DEPRECATED", - "updated": "2010-10-09T11:30:00Z", + "updated": "2011-01-21T11:33:21Z", "links": [ { "rel": "self", @@ -64,6 +135,183 @@ class VersionsTest(test.TestCase): ] self.assertEqual(versions, expected) + def test_get_version_1_0_detail(self): + req = webob.Request.blank('/v1.0/') + req.accept = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, "application/json") + version = json.loads(res.body) + expected = { + "version": { + "id": "v1.0", + "status": "DEPRECATED", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.0/" + }, + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/cs-devguide-20110125.pdf" + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/application.wadl" + } + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/" + "vnd.openstack.compute-v1.0+xml" + }, + { + "base": "application/json", + "type": "application/" + "vnd.openstack.compute-v1.0+json" + } + ] + } + } + self.assertEqual(expected, version) + + def test_get_version_1_1_detail(self): + req = webob.Request.blank('/v1.1/') + req.accept = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, "application/json") + version = json.loads(res.body) + expected = { + "version": { + "id": "v1.1", + "status": "CURRENT", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/" + }, + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/cs-devguide-20110125.pdf" + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl" + } + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/" + "vnd.openstack.compute-v1.1+xml" + }, + { + "base": "application/json", + "type": "application/" + "vnd.openstack.compute-v1.1+json" + } + ] + } + } + self.assertEqual(expected, version) + + def test_get_version_1_0_detail_xml(self): + req = webob.Request.blank('/v1.0/') + req.accept = "application/xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, "application/xml") + root = xml.etree.ElementTree.XML(res.body) + self.assertEqual(root.tag.split('}')[1], "version") + self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) + + children = list(root) + media_types = children[0] + media_type_nodes = list(media_types) + links = (children[1], children[2], children[3]) + + self.assertEqual(media_types.tag.split('}')[1], 'media-types') + for media_node in media_type_nodes: + self.assertEqual(media_node.tag.split('}')[1], 'media-type') + + expected = """ + <version id="v1.0" status="DEPRECATED" + updated="2011-01-21T11:33:21Z" + xmlns="%s" + xmlns:atom="http://www.w3.org/2005/Atom"> + + <media-types> + <media-type base="application/xml" + type="application/vnd.openstack.compute-v1.0+xml"/> + <media-type base="application/json" + type="application/vnd.openstack.compute-v1.0+json"/> + </media-types> + + <atom:link href="http://localhost/v1.0/" + rel="self"/> + + <atom:link href="http://docs.rackspacecloud.com/servers/ + api/v1.0/cs-devguide-20110125.pdf" + rel="describedby" + type="application/pdf"/> + + <atom:link href="http://docs.rackspacecloud.com/servers/ + api/v1.0/application.wadl" + rel="describedby" + type="application/vnd.sun.wadl+xml"/> + </version>""".replace(" ", "").replace("\n", "") % wsgi.XMLNS_V11 + + actual = res.body.replace(" ", "").replace("\n", "") + self.assertEqual(expected, actual) + + def test_get_version_1_1_detail_xml(self): + req = webob.Request.blank('/v1.1/') + req.accept = "application/xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, "application/xml") + expected = """ + <version id="v1.1" status="CURRENT" + updated="2011-01-21T11:33:21Z" + xmlns="%s" + xmlns:atom="http://www.w3.org/2005/Atom"> + + <media-types> + <media-type base="application/xml" + type="application/vnd.openstack.compute-v1.1+xml"/> + <media-type base="application/json" + type="application/vnd.openstack.compute-v1.1+json"/> + </media-types> + + <atom:link href="http://localhost/v1.1/" + rel="self"/> + + <atom:link href="http://docs.rackspacecloud.com/servers/ + api/v1.1/cs-devguide-20110125.pdf" + rel="describedby" + type="application/pdf"/> + + <atom:link href="http://docs.rackspacecloud.com/servers/ + api/v1.1/application.wadl" + rel="describedby" + type="application/vnd.sun.wadl+xml"/> + </version>""".replace(" ", "").replace("\n", "") % wsgi.XMLNS_V11 + + actual = res.body.replace(" ", "").replace("\n", "") + self.assertEqual(expected, actual) + def test_get_version_list_xml(self): req = webob.Request.blank('/') req.accept = "application/xml" @@ -71,18 +319,94 @@ class VersionsTest(test.TestCase): self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/xml") - expected = """<versions> - <version id="v1.1" status="CURRENT" updated="2011-07-18T11:30:00Z"> + expected = """ + <versions xmlns="%s" xmlns:atom="%s"> + <version id="v1.1" status="CURRENT" updated="2011-01-21T11:33:21Z"> <atom:link href="http://localhost/v1.1/" rel="self"/> </version> <version id="v1.0" status="DEPRECATED" - updated="2010-10-09T11:30:00Z"> + updated="2011-01-21T11:33:21Z"> <atom:link href="http://localhost/v1.0/" rel="self"/> </version> - </versions>""".replace(" ", "").replace("\n", "") + </versions>""".replace(" ", "").replace("\n", "") % (wsgi.XMLNS_V11, + wsgi.XMLNS_ATOM) + + actual = res.body.replace(" ", "").replace("\n", "") + + self.assertEqual(expected, actual) + + def test_get_version_1_0_detail_atom(self): + req = webob.Request.blank('/v1.0/') + req.accept = "application/atom+xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual("application/atom+xml", res.content_type) + expected = """ + <feed xmlns="http://www.w3.org/2005/Atom"> + <title type="text">About This Version</title> + <updated>2011-01-21T11:33:21Z</updated> + <id>http://localhost/v1.0/</id> + <author> + <name>Rackspace</name> + <uri>http://www.rackspace.com/</uri> + </author> + <link href="http://localhost/v1.0/" rel="self"/> + <entry> + <id>http://localhost/v1.0/</id> + <title type="text">Version v1.0</title> + <updated>2011-01-21T11:33:21Z</updated> + <link href="http://localhost/v1.0/" + rel="self"/> + <link href="http://docs.rackspacecloud.com/servers/ + api/v1.0/cs-devguide-20110125.pdf" + rel="describedby" type="application/pdf"/> + <link href="http://docs.rackspacecloud.com/servers/ + api/v1.0/application.wadl" + rel="describedby" type="application/vnd.sun.wadl+xml"/> + <content type="text"> + Version v1.0 DEPRECATED (2011-01-21T11:33:21Z) + </content> + </entry> + </feed>""".replace(" ", "").replace("\n", "") actual = res.body.replace(" ", "").replace("\n", "") + self.assertEqual(expected, actual) + + def test_get_version_1_1_detail_atom(self): + req = webob.Request.blank('/v1.1/') + req.accept = "application/atom+xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual("application/atom+xml", res.content_type) + expected = """ + <feed xmlns="http://www.w3.org/2005/Atom"> + <title type="text">About This Version</title> + <updated>2011-01-21T11:33:21Z</updated> + <id>http://localhost/v1.1/</id> + <author> + <name>Rackspace</name> + <uri>http://www.rackspace.com/</uri> + </author> + <link href="http://localhost/v1.1/" rel="self"/> + <entry> + <id>http://localhost/v1.1/</id> + <title type="text">Version v1.1</title> + <updated>2011-01-21T11:33:21Z</updated> + <link href="http://localhost/v1.1/" + rel="self"/> + <link href="http://docs.rackspacecloud.com/servers/ + api/v1.1/cs-devguide-20110125.pdf" + rel="describedby" type="application/pdf"/> + <link href="http://docs.rackspacecloud.com/servers/ + api/v1.1/application.wadl" + rel="describedby" type="application/vnd.sun.wadl+xml"/> + <content type="text"> + Version v1.1 CURRENT (2011-01-21T11:33:21Z) + </content> + </entry> + </feed>""".replace(" ", "").replace("\n", "") + actual = res.body.replace(" ", "").replace("\n", "") self.assertEqual(expected, actual) def test_get_version_list_atom(self): @@ -95,7 +419,7 @@ class VersionsTest(test.TestCase): expected = """ <feed xmlns="http://www.w3.org/2005/Atom"> <title type="text">Available API Versions</title> - <updated>2011-07-18T11:30:00Z</updated> + <updated>2011-01-21T11:33:21Z</updated> <id>http://localhost/</id> <author> <name>Rackspace</name> @@ -105,19 +429,19 @@ class VersionsTest(test.TestCase): <entry> <id>http://localhost/v1.1/</id> <title type="text">Version v1.1</title> - <updated>2011-07-18T11:30:00Z</updated> + <updated>2011-01-21T11:33:21Z</updated> <link href="http://localhost/v1.1/" rel="self"/> <content type="text"> - Version v1.1 CURRENT (2011-07-18T11:30:00Z) + Version v1.1 CURRENT (2011-01-21T11:33:21Z) </content> </entry> <entry> <id>http://localhost/v1.0/</id> <title type="text">Version v1.0</title> - <updated>2010-10-09T11:30:00Z</updated> + <updated>2011-01-21T11:33:21Z</updated> <link href="http://localhost/v1.0/" rel="self"/> <content type="text"> - Version v1.0 DEPRECATED (2010-10-09T11:30:00Z) + Version v1.0 DEPRECATED (2011-01-21T11:33:21Z) </content> </entry> </feed> @@ -127,28 +451,184 @@ class VersionsTest(test.TestCase): self.assertEqual(expected, actual) + def test_multi_choice_image(self): + req = webob.Request.blank('/images/1') + req.accept = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 300) + self.assertEqual(res.content_type, "application/json") + + expected = { + "choices": [ + { + "id": "v1.1", + "status": "CURRENT", + "links": [ + { + "href": "http://localhost/v1.1/images/1", + "rel": "self", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.1+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.1+json" + }, + ], + }, + { + "id": "v1.0", + "status": "DEPRECATED", + "links": [ + { + "href": "http://localhost/v1.0/images/1", + "rel": "self", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.0+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.0+json" + }, + ], + }, + ], } + + self.assertDictMatch(expected, json.loads(res.body)) + + def test_multi_choice_image_xml(self): + req = webob.Request.blank('/images/1') + req.accept = "application/xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 300) + self.assertEqual(res.content_type, "application/xml") + + expected = """ + <choices xmlns="%s" xmlns:atom="%s"> + <version id="v1.1" status="CURRENT"> + <media-types> + <media-type base="application/xml" + type="application/vnd.openstack.compute-v1.1+xml"/> + <media-type base="application/json" + type="application/vnd.openstack.compute-v1.1+json"/> + </media-types> + <atom:link href="http://localhost/v1.1/images/1" rel="self"/> + </version> + <version id="v1.0" status="DEPRECATED"> + <media-types> + <media-type base="application/xml" + type="application/vnd.openstack.compute-v1.0+xml"/> + <media-type base="application/json" + type="application/vnd.openstack.compute-v1.0+json"/> + </media-types> + <atom:link href="http://localhost/v1.0/images/1" rel="self"/> + </version> + </choices>""".replace(" ", "").replace("\n", "") % (wsgi.XMLNS_V11, + wsgi.XMLNS_ATOM) + + def test_multi_choice_server_atom(self): + """ + Make sure multi choice responses do not have content-type + application/atom+xml (should use default of json) + """ + req = webob.Request.blank('/servers/2') + req.accept = "application/atom+xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 300) + self.assertEqual(res.content_type, "application/json") + + def test_multi_choice_server(self): + req = webob.Request.blank('/servers/2') + req.accept = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 300) + self.assertEqual(res.content_type, "application/json") + + expected = { + "choices": [ + { + "id": "v1.1", + "status": "CURRENT", + "links": [ + { + "href": "http://localhost/v1.1/servers/2", + "rel": "self", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.1+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.1+json" + }, + ], + }, + { + "id": "v1.0", + "status": "DEPRECATED", + "links": [ + { + "href": "http://localhost/v1.0/servers/2", + "rel": "self", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.0+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.0+json" + }, + ], + }, + ], } + + self.assertDictMatch(expected, json.loads(res.body)) + + +class VersionsViewBuilderTests(test.TestCase): def test_view_builder(self): base_url = "http://example.org/" version_data = { - "id": "3.2.1", - "status": "CURRENT", - "updated": "2011-07-18T11:30:00Z"} + "v3.2.1": { + "id": "3.2.1", + "status": "CURRENT", + "updated": "2011-07-18T11:30:00Z", + } + } expected = { - "id": "3.2.1", - "status": "CURRENT", - "updated": "2011-07-18T11:30:00Z", - "links": [ + "versions": [ { - "rel": "self", - "href": "http://example.org/3.2.1/", - }, - ], + "id": "3.2.1", + "status": "CURRENT", + "updated": "2011-07-18T11:30:00Z", + "links": [ + { + "rel": "self", + "href": "http://example.org/3.2.1/", + }, + ], + } + ] } builder = views.versions.ViewBuilder(base_url) - output = builder.build(version_data) + output = builder.build_versions(version_data) self.assertEqual(output, expected) @@ -163,7 +643,9 @@ class VersionsTest(test.TestCase): self.assertEqual(actual, expected) - def test_xml_serializer(self): + +class VersionsSerializerTests(test.TestCase): + def test_versions_list_xml_serializer(self): versions_data = { 'versions': [ { @@ -180,20 +662,137 @@ class VersionsTest(test.TestCase): ] } - expected = """ - <versions> - <version id="2.7.1" status="DEPRECATED" - updated="2011-07-18T11:30:00Z"> - <atom:link href="http://test/2.7.1" rel="self"/> - </version> - </versions>""".replace(" ", "").replace("\n", "") + serializer = versions.VersionsXMLSerializer() + response = serializer.index(versions_data) + + root = xml.etree.ElementTree.XML(response) + self.assertEqual(root.tag.split('}')[1], "versions") + self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) + version = list(root)[0] + self.assertEqual(version.tag.split('}')[1], "version") + self.assertEqual(version.get('id'), + versions_data['versions'][0]['id']) + self.assertEqual(version.get('status'), + versions_data['versions'][0]['status']) + + link = list(version)[0] + + self.assertEqual(link.tag.split('}')[1], "link") + self.assertEqual(link.tag.split('}')[0].strip('{'), wsgi.XMLNS_ATOM) + for key, val in versions_data['versions'][0]['links'][0].items(): + self.assertEqual(link.get(key), val) + + def test_versions_multi_xml_serializer(self): + versions_data = { + 'choices': [ + { + "id": "2.7.1", + "updated": "2011-07-18T11:30:00Z", + "status": "DEPRECATED", + "media-types": VERSIONS['v1.1']['media-types'], + "links": [ + { + "rel": "self", + "href": "http://test/2.7.1/images", + }, + ], + }, + ] + } serializer = versions.VersionsXMLSerializer() - response = serializer.default(versions_data) - response = response.replace(" ", "").replace("\n", "") - self.assertEqual(expected, response) + response = serializer.multi(versions_data) + + root = xml.etree.ElementTree.XML(response) + self.assertEqual(root.tag.split('}')[1], "choices") + self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) + version = list(root)[0] + self.assertEqual(version.tag.split('}')[1], "version") + self.assertEqual(version.get('id'), versions_data['choices'][0]['id']) + self.assertEqual(version.get('status'), + versions_data['choices'][0]['status']) + + media_types = list(version)[0] + media_type_nodes = list(media_types) + self.assertEqual(media_types.tag.split('}')[1], "media-types") + + set_types = versions_data['choices'][0]['media-types'] + for i, type in enumerate(set_types): + node = media_type_nodes[i] + self.assertEqual(node.tag.split('}')[1], "media-type") + for key, val in set_types[i].items(): + self.assertEqual(node.get(key), val) + + link = list(version)[1] + + self.assertEqual(link.tag.split('}')[1], "link") + self.assertEqual(link.tag.split('}')[0].strip('{'), wsgi.XMLNS_ATOM) + for key, val in versions_data['choices'][0]['links'][0].items(): + self.assertEqual(link.get(key), val) + + def test_version_detail_xml_serializer(self): + version_data = { + "version": { + "id": "v1.0", + "status": "CURRENT", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.0/" + }, + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/cs-devguide-20110125.pdf" + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/application.wadl" + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.0+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.0+json" + } + ], + }, + } + + serializer = versions.VersionsXMLSerializer() + response = serializer.show(version_data) + + root = xml.etree.ElementTree.XML(response) + self.assertEqual(root.tag.split('}')[1], "version") + self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) - def test_atom_serializer(self): + children = list(root) + media_types = children[0] + media_type_nodes = list(media_types) + links = (children[1], children[2], children[3]) + + self.assertEqual(media_types.tag.split('}')[1], 'media-types') + for i, media_node in enumerate(media_type_nodes): + self.assertEqual(media_node.tag.split('}')[1], 'media-type') + for key, val in version_data['version']['media-types'][i].items(): + self.assertEqual(val, media_node.get(key)) + + for i, link in enumerate(links): + self.assertEqual(link.tag.split('}')[0].strip('{'), + 'http://www.w3.org/2005/Atom') + self.assertEqual(link.tag.split('}')[1], 'link') + for key, val in version_data['version']['links'][i].items(): + self.assertEqual(val, link.get(key)) + + def test_versions_list_atom_serializer(self): versions_data = { 'versions': [ { @@ -210,45 +809,158 @@ class VersionsTest(test.TestCase): ] } - expected = """ - <feed xmlns="http://www.w3.org/2005/Atom"> - <title type="text"> - Available API Versions - </title> - <updated> - 2011-07-20T11:40:00Z - </updated> - <id> - http://test/ - </id> - <author> - <name> - Rackspace - </name> - <uri> - http://www.rackspace.com/ - </uri> - </author> - <link href="http://test/" rel="self"/> - <entry> - <id> - http://test/2.9.8 - </id> - <title type="text"> - Version 2.9.8 - </title> - <updated> - 2011-07-20T11:40:00Z - </updated> - <link href="http://test/2.9.8" rel="self"/> - <content type="text"> - Version 2.9.8 CURRENT (2011-07-20T11:40:00Z) - </content> - </entry> - </feed>""".replace(" ", "").replace("\n", "") + serializer = versions.VersionsAtomSerializer() + response = serializer.index(versions_data) + + root = xml.etree.ElementTree.XML(response) + self.assertEqual(root.tag.split('}')[1], "feed") + self.assertEqual(root.tag.split('}')[0].strip('{'), + "http://www.w3.org/2005/Atom") + + children = list(root) + title = children[0] + updated = children[1] + id = children[2] + author = children[3] + link = children[4] + entry = children[5] + + self.assertEqual(title.tag.split('}')[1], 'title') + self.assertEqual(title.text, 'Available API Versions') + self.assertEqual(updated.tag.split('}')[1], 'updated') + self.assertEqual(updated.text, '2011-07-20T11:40:00Z') + self.assertEqual(id.tag.split('}')[1], 'id') + self.assertEqual(id.text, 'http://test/') + + self.assertEqual(author.tag.split('}')[1], 'author') + author_name = list(author)[0] + author_uri = list(author)[1] + self.assertEqual(author_name.tag.split('}')[1], 'name') + self.assertEqual(author_name.text, 'Rackspace') + self.assertEqual(author_uri.tag.split('}')[1], 'uri') + self.assertEqual(author_uri.text, 'http://www.rackspace.com/') + + self.assertEqual(link.get('href'), 'http://test/') + self.assertEqual(link.get('rel'), 'self') + + self.assertEqual(entry.tag.split('}')[1], 'entry') + entry_children = list(entry) + entry_id = entry_children[0] + entry_title = entry_children[1] + entry_updated = entry_children[2] + entry_link = entry_children[3] + entry_content = entry_children[4] + self.assertEqual(entry_id.tag.split('}')[1], "id") + self.assertEqual(entry_id.text, "http://test/2.9.8") + self.assertEqual(entry_title.tag.split('}')[1], "title") + self.assertEqual(entry_title.get('type'), "text") + self.assertEqual(entry_title.text, "Version 2.9.8") + self.assertEqual(entry_updated.tag.split('}')[1], "updated") + self.assertEqual(entry_updated.text, "2011-07-20T11:40:00Z") + self.assertEqual(entry_link.tag.split('}')[1], "link") + self.assertEqual(entry_link.get('href'), "http://test/2.9.8") + self.assertEqual(entry_link.get('rel'), "self") + self.assertEqual(entry_content.tag.split('}')[1], "content") + self.assertEqual(entry_content.get('type'), "text") + self.assertEqual(entry_content.text, + "Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)") + + def test_version_detail_atom_serializer(self): + versions_data = { + "version": { + "id": "v1.1", + "status": "CURRENT", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/" + }, + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/cs-devguide-20110125.pdf" + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl" + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.1+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.1+json" + } + ], + }, + } serializer = versions.VersionsAtomSerializer() - response = serializer.default(versions_data) - print response - response = response.replace(" ", "").replace("\n", "") - self.assertEqual(expected, response) + response = serializer.show(versions_data) + + root = xml.etree.ElementTree.XML(response) + self.assertEqual(root.tag.split('}')[1], "feed") + self.assertEqual(root.tag.split('}')[0].strip('{'), + "http://www.w3.org/2005/Atom") + + children = list(root) + title = children[0] + updated = children[1] + id = children[2] + author = children[3] + link = children[4] + entry = children[5] + + self.assertEqual(root.tag.split('}')[1], 'feed') + self.assertEqual(title.tag.split('}')[1], 'title') + self.assertEqual(title.text, 'About This Version') + self.assertEqual(updated.tag.split('}')[1], 'updated') + self.assertEqual(updated.text, '2011-01-21T11:33:21Z') + self.assertEqual(id.tag.split('}')[1], 'id') + self.assertEqual(id.text, 'http://localhost/v1.1/') + + self.assertEqual(author.tag.split('}')[1], 'author') + author_name = list(author)[0] + author_uri = list(author)[1] + self.assertEqual(author_name.tag.split('}')[1], 'name') + self.assertEqual(author_name.text, 'Rackspace') + self.assertEqual(author_uri.tag.split('}')[1], 'uri') + self.assertEqual(author_uri.text, 'http://www.rackspace.com/') + + self.assertEqual(link.get('href'), + 'http://localhost/v1.1/') + self.assertEqual(link.get('rel'), 'self') + + self.assertEqual(entry.tag.split('}')[1], 'entry') + entry_children = list(entry) + entry_id = entry_children[0] + entry_title = entry_children[1] + entry_updated = entry_children[2] + entry_links = (entry_children[3], entry_children[4], entry_children[5]) + entry_content = entry_children[6] + + self.assertEqual(entry_id.tag.split('}')[1], "id") + self.assertEqual(entry_id.text, + "http://localhost/v1.1/") + self.assertEqual(entry_title.tag.split('}')[1], "title") + self.assertEqual(entry_title.get('type'), "text") + self.assertEqual(entry_title.text, "Version v1.1") + self.assertEqual(entry_updated.tag.split('}')[1], "updated") + self.assertEqual(entry_updated.text, "2011-01-21T11:33:21Z") + + for i, link in enumerate(versions_data["version"]["links"]): + self.assertEqual(entry_links[i].tag.split('}')[1], "link") + for key, val in versions_data["version"]["links"][i].items(): + self.assertEqual(entry_links[i].get(key), val) + + self.assertEqual(entry_content.tag.split('}')[1], "content") + self.assertEqual(entry_content.get('type'), "text") + self.assertEqual(entry_content.text, + "Version v1.1 CURRENT (2011-01-21T11:33:21Z)") diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index aac3ff330..d51b19ccd 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -60,7 +60,10 @@ class FakeGlance(object): 'container_format': 'ovf'}, 'image_data': StringIO.StringIO('')}} - def __init__(self, host, port=None, use_ssl=False): + def __init__(self, host, port=None, use_ssl=False, auth_tok=None): + pass + + def set_auth_token(self, auth_tok): pass def get_image_meta(self, image_id): diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 223e7ae57..5a40f578f 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -31,6 +31,9 @@ class StubGlanceClient(object): self.add_response = add_response self.update_response = update_response + def set_auth_token(self, auth_tok): + pass + def get_image_meta(self, image_id): return self.images[image_id] diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 860cdedd3..879e4b9cb 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -496,8 +496,8 @@ class ComputeTestCase(test.TestCase): db.instance_update(self.context, instance_id, {'instance_type_id': inst_type['id']}) - self.assertRaises(exception.ApiError, self.compute_api.resize, - context, instance_id, 1) + self.assertRaises(exception.CannotResizeToSmallerSize, + self.compute_api.resize, context, instance_id, 1) self.compute.terminate_instance(context, instance_id) @@ -508,8 +508,8 @@ class ComputeTestCase(test.TestCase): self.compute.run_instance(self.context, instance_id) - self.assertRaises(exception.ApiError, self.compute_api.resize, - context, instance_id, 1) + self.assertRaises(exception.CannotResizeToSameSize, + self.compute_api.resize, context, instance_id, 1) self.compute.terminate_instance(context, instance_id) diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index 54448f9d6..0c07cbb7c 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -57,7 +57,7 @@ class DbApiTestCase(test.TestCase): def test_instance_get_project_vpn(self): values = {'instance_type_id': FLAGS.default_instance_type, 'image_ref': FLAGS.vpn_image_id, - 'project_id': self.project_id + 'project_id': self.project_id, } instance = db.instance_create(self.context, values) result = db.instance_get_project_vpn(self.context.elevated(), @@ -67,7 +67,7 @@ class DbApiTestCase(test.TestCase): def test_instance_get_project_vpn_joins(self): values = {'instance_type_id': FLAGS.default_instance_type, 'image_ref': FLAGS.vpn_image_id, - 'project_id': self.project_id + 'project_id': self.project_id, } instance = db.instance_create(self.context, values) _setup_networking(instance['id']) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index c5c3151dc..cf25ce215 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -346,7 +346,7 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = connection.LibvirtConnection(False) - conn.snapshot(instance_ref, recv_meta['id']) + conn.snapshot(self.context, instance_ref, recv_meta['id']) snapshot = image_service.show(context, recv_meta['id']) self.assertEquals(snapshot['properties']['image_state'], 'available') @@ -386,7 +386,7 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = connection.LibvirtConnection(False) - conn.snapshot(instance_ref, recv_meta['id']) + conn.snapshot(self.context, instance_ref, recv_meta['id']) snapshot = image_service.show(context, recv_meta['id']) self.assertEquals(snapshot['properties']['image_state'], 'available') @@ -736,7 +736,7 @@ class LibvirtConnTestCase(test.TestCase): network_info = _create_network_info() try: - conn.spawn(instance, network_info) + conn.spawn(self.context, instance, network_info) except Exception, e: count = (0 <= str(e.message).find('Unexpected method call')) diff --git a/nova/tests/test_twistd.py b/nova/tests/test_twistd.py deleted file mode 100644 index ff8627c3b..000000000 --- a/nova/tests/test_twistd.py +++ /dev/null @@ -1,53 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import StringIO -import sys - -from nova import twistd -from nova import exception -from nova import flags -from nova import test - - -FLAGS = flags.FLAGS - - -class TwistdTestCase(test.TestCase): - def setUp(self): - super(TwistdTestCase, self).setUp() - self.Options = twistd.WrapTwistedOptions(twistd.TwistdServerOptions) - sys.stdout = StringIO.StringIO() - - def tearDown(self): - super(TwistdTestCase, self).tearDown() - sys.stdout = sys.__stdout__ - - def test_basic(self): - options = self.Options() - argv = options.parseOptions() - - def test_logfile(self): - options = self.Options() - argv = options.parseOptions(['--logfile=foo']) - self.assertEqual(FLAGS.logfile, 'foo') - - def test_help(self): - options = self.Options() - self.assertRaises(SystemExit, options.parseOptions, ['--help']) - self.assert_('pidfile' in sys.stdout.getvalue()) diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index 0c359e981..ec5098a37 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime import os import tempfile @@ -306,3 +307,80 @@ class IsUUIDLikeTestCase(test.TestCase): def test_non_uuid_string_passed(self): val = 'foo-fooo' self.assertUUIDLike(val, False) + + +class ToPrimitiveTestCase(test.TestCase): + def test_list(self): + self.assertEquals(utils.to_primitive([1, 2, 3]), [1, 2, 3]) + + def test_empty_list(self): + self.assertEquals(utils.to_primitive([]), []) + + def test_tuple(self): + self.assertEquals(utils.to_primitive((1, 2, 3)), [1, 2, 3]) + + def test_dict(self): + self.assertEquals(utils.to_primitive(dict(a=1, b=2, c=3)), + dict(a=1, b=2, c=3)) + + def test_empty_dict(self): + self.assertEquals(utils.to_primitive({}), {}) + + def test_datetime(self): + x = datetime.datetime(1, 2, 3, 4, 5, 6, 7) + self.assertEquals(utils.to_primitive(x), "0001-02-03 04:05:06.000007") + + def test_iter(self): + class IterClass(object): + def __init__(self): + self.data = [1, 2, 3, 4, 5] + self.index = 0 + + def __iter__(self): + return self + + def next(self): + if self.index == len(self.data): + raise StopIteration + self.index = self.index + 1 + return self.data[self.index - 1] + + x = IterClass() + self.assertEquals(utils.to_primitive(x), [1, 2, 3, 4, 5]) + + def test_iteritems(self): + class IterItemsClass(object): + def __init__(self): + self.data = dict(a=1, b=2, c=3).items() + self.index = 0 + + def __iter__(self): + return self + + def next(self): + if self.index == len(self.data): + raise StopIteration + self.index = self.index + 1 + return self.data[self.index - 1] + + x = IterItemsClass() + ordered = utils.to_primitive(x) + ordered.sort() + self.assertEquals(ordered, [['a', 1], ['b', 2], ['c', 3]]) + + def test_instance(self): + class MysteryClass(object): + a = 10 + + def __init__(self): + self.b = 1 + + x = MysteryClass() + self.assertEquals(utils.to_primitive(x, convert_instances=True), + dict(b=1)) + + self.assertEquals(utils.to_primitive(x), x) + + def test_typeerror(self): + x = bytearray # Class, not instance + self.assertEquals(utils.to_primitive(x), u"<type 'bytearray'>") diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py index 3d87d67ad..06daf46e8 100644 --- a/nova/tests/test_vmwareapi.py +++ b/nova/tests/test_vmwareapi.py @@ -40,6 +40,7 @@ class VMWareAPIVMTestCase(test.TestCase): def setUp(self): super(VMWareAPIVMTestCase, self).setUp() + self.context = context.RequestContext('fake', 'fake', False) self.flags(vmwareapi_host_ip='test_url', vmwareapi_host_username='test_username', vmwareapi_host_password='test_pass') @@ -94,7 +95,7 @@ class VMWareAPIVMTestCase(test.TestCase): """Create and spawn the VM.""" self._create_instance_in_the_db() self.type_data = db.instance_type_get_by_name(None, 'm1.large') - self.conn.spawn(self.instance, self.network_info) + self.conn.spawn(self.context, self.instance, self.network_info) self._check_vm_record() def _check_vm_record(self): @@ -156,14 +157,14 @@ class VMWareAPIVMTestCase(test.TestCase): self._create_vm() info = self.conn.get_info(1) self._check_vm_info(info, power_state.RUNNING) - self.conn.snapshot(self.instance, "Test-Snapshot") + self.conn.snapshot(self.context, self.instance, "Test-Snapshot") info = self.conn.get_info(1) self._check_vm_info(info, power_state.RUNNING) def test_snapshot_non_existent(self): self._create_instance_in_the_db() - self.assertRaises(Exception, self.conn.snapshot, self.instance, - "Test-Snapshot") + self.assertRaises(Exception, self.conn.snapshot, self.context, + self.instance, "Test-Snapshot") def test_reboot(self): self._create_vm() diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index d4bca1281..a795b3c74 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -226,7 +226,7 @@ class XenAPIVMTestCase(test.TestCase): 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] instance = db.instance_create(self.context, values) - self.conn.spawn(instance, network_info) + self.conn.spawn(self.context, instance, network_info) gt1 = eventlet.spawn(_do_build, 1, self.project_id, self.user_id) gt2 = eventlet.spawn(_do_build, 2, self.project_id, self.user_id) @@ -256,14 +256,15 @@ class XenAPIVMTestCase(test.TestCase): instance = self._create_instance() name = "MySnapshot" - self.assertRaises(exception.Error, self.conn.snapshot, instance, name) + self.assertRaises(exception.Error, self.conn.snapshot, + self.context, instance, name) def test_instance_snapshot(self): stubs.stubout_instance_snapshot(self.stubs) instance = self._create_instance() name = "MySnapshot" - template_vm_ref = self.conn.snapshot(instance, name) + template_vm_ref = self.conn.snapshot(self.context, instance, name) def ensure_vm_was_torn_down(): vm_labels = [] @@ -425,14 +426,13 @@ class XenAPIVMTestCase(test.TestCase): 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] - self.conn.spawn(instance, network_info) + self.conn.spawn(self.context, instance, network_info) self.create_vm_record(self.conn, os_type, instance_id) self.check_vm_record(self.conn, check_injection) self.assertTrue(instance.os_type) self.assertTrue(instance.architecture) def test_spawn_not_enough_memory(self): - FLAGS.xenapi_image_service = 'glance' self.assertRaises(Exception, self._test_spawn, 1, 2, 3, "4") # m1.xlarge @@ -444,7 +444,6 @@ class XenAPIVMTestCase(test.TestCase): """ vdi_recs_start = self._list_vdis() - FLAGS.xenapi_image_service = 'glance' stubs.stubout_fetch_image_glance_disk(self.stubs) self.assertRaises(xenapi_fake.Failure, self._test_spawn, 1, 2, 3) @@ -459,7 +458,6 @@ class XenAPIVMTestCase(test.TestCase): """ vdi_recs_start = self._list_vdis() - FLAGS.xenapi_image_service = 'glance' stubs.stubout_create_vm(self.stubs) self.assertRaises(xenapi_fake.Failure, self._test_spawn, 1, 2, 3) @@ -467,40 +465,12 @@ class XenAPIVMTestCase(test.TestCase): vdi_recs_end = self._list_vdis() self._check_vdis(vdi_recs_start, vdi_recs_end) - def test_spawn_raw_objectstore(self): - # TODO(vish): deprecated - from nova.auth import manager - authman = manager.AuthManager() - authman.create_user('fake', 'fake') - authman.create_project('fake', 'fake') - try: - FLAGS.xenapi_image_service = 'objectstore' - self._test_spawn(1, None, None) - finally: - authman.delete_project('fake') - authman.delete_user('fake') - - def test_spawn_objectstore(self): - # TODO(vish): deprecated - from nova.auth import manager - authman = manager.AuthManager() - authman.create_user('fake', 'fake') - authman.create_project('fake', 'fake') - try: - FLAGS.xenapi_image_service = 'objectstore' - self._test_spawn(1, 2, 3) - finally: - authman.delete_project('fake') - authman.delete_user('fake') - @stub_vm_utils_with_vdi_attached_here def test_spawn_raw_glance(self): - FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None) self.check_vm_params_for_linux() def test_spawn_vhd_glance_linux(self): - FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, os_type="linux", architecture="x86-64") self.check_vm_params_for_linux() @@ -529,20 +499,17 @@ class XenAPIVMTestCase(test.TestCase): self.assertEqual(len(self.vm['VBDs']), 1) def test_spawn_vhd_glance_windows(self): - FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, os_type="windows", architecture="i386") self.check_vm_params_for_windows() def test_spawn_glance(self): - FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, glance_stubs.FakeGlance.IMAGE_KERNEL, glance_stubs.FakeGlance.IMAGE_RAMDISK) self.check_vm_params_for_linux_with_external_kernel() def test_spawn_netinject_file(self): - FLAGS.xenapi_image_service = 'glance' db_fakes.stub_out_db_instance_api(self.stubs, injected=True) self._tee_executed = False @@ -568,7 +535,6 @@ class XenAPIVMTestCase(test.TestCase): # Capture the sudo tee .../etc/network/interfaces command (r'(sudo\s+)?tee.*interfaces', _tee_handler), ]) - FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, glance_stubs.FakeGlance.IMAGE_KERNEL, glance_stubs.FakeGlance.IMAGE_RAMDISK, @@ -576,7 +542,6 @@ class XenAPIVMTestCase(test.TestCase): self.assertTrue(self._tee_executed) def test_spawn_netinject_xenstore(self): - FLAGS.xenapi_image_service = 'glance' db_fakes.stub_out_db_instance_api(self.stubs, injected=True) self._tee_executed = False @@ -621,7 +586,7 @@ class XenAPIVMTestCase(test.TestCase): self.assertFalse(self._tee_executed) def test_spawn_vlanmanager(self): - self.flags(xenapi_image_service='glance', + self.flags(image_service='nova.image.glance.GlanceImageService', network_manager='nova.network.manager.VlanManager', vlan_interface='fake0') @@ -665,7 +630,7 @@ class XenAPIVMTestCase(test.TestCase): self.flags(flat_injected=False) instance = self._create_instance() conn = xenapi_conn.get_connection(False) - conn.rescue(instance, None, []) + conn.rescue(self.context, instance, None, []) def test_unrescue(self): instance = self._create_instance() @@ -702,7 +667,7 @@ class XenAPIVMTestCase(test.TestCase): 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] if spawn: - self.conn.spawn(instance, network_info) + self.conn.spawn(self.context, instance, network_info) return instance @@ -812,8 +777,9 @@ class XenAPIMigrateInstance(test.TestCase): 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] - conn.finish_migration(instance, dict(base_copy='hurr', cow='durr'), - network_info, resize_instance=True) + conn.finish_migration(self.context, instance, + dict(base_copy='hurr', cow='durr'), + network_info, resize_instance=True) self.assertEqual(self.called, True) def test_finish_migrate_no_local_storage(self): @@ -844,8 +810,9 @@ class XenAPIMigrateInstance(test.TestCase): 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] - conn.finish_migration(instance, dict(base_copy='hurr', cow='durr'), - network_info, resize_instance=True) + conn.finish_migration(self.context, instance, + dict(base_copy='hurr', cow='durr'), + network_info, resize_instance=True) def test_finish_migrate_no_resize_vdi(self): instance = db.instance_create(self.context, self.values) @@ -874,8 +841,9 @@ class XenAPIMigrateInstance(test.TestCase): 'rxtx_cap': 3})] # Resize instance would be determined by the compute call - conn.finish_migration(instance, dict(base_copy='hurr', cow='durr'), - network_info, resize_instance=False) + conn.finish_migration(self.context, instance, + dict(base_copy='hurr', cow='durr'), + network_info, resize_instance=False) class XenAPIImageTypeTestCase(test.TestCase): @@ -915,7 +883,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): def test_instance_disk(self): """If a kernel is specified, the image type is DISK (aka machine).""" - FLAGS.xenapi_image_service = 'objectstore' self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL self.assert_disk_type(vm_utils.ImageType.DISK) @@ -925,7 +892,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): If the kernel isn't specified, and we're not using Glance, then DISK_RAW is assumed. """ - FLAGS.xenapi_image_service = 'objectstore' self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_RAW) @@ -935,7 +901,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): If we're using Glance, then defer to the image_type field, which in this case will be 'raw'. """ - FLAGS.xenapi_image_service = 'glance' self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_RAW) @@ -945,7 +910,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): If we're using Glance, then defer to the image_type field, which in this case will be 'vhd'. """ - FLAGS.xenapi_image_service = 'glance' self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_VHD) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 66c79d465..0d0f84e32 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -28,8 +28,8 @@ from nova import utils def stubout_instance_snapshot(stubs): @classmethod - def fake_fetch_image(cls, session, instance_id, image, user, project, - type): + def fake_fetch_image(cls, context, session, instance_id, image, user, + project, type): from nova.virt.xenapi.fake import create_vdi name_label = "instance-%s" % instance_id #TODO: create fake SR record @@ -227,7 +227,7 @@ def stub_out_vm_methods(stubs): def fake_release_bootlock(self, vm): pass - def fake_spawn_rescue(self, inst): + def fake_spawn_rescue(self, context, inst, network_info): inst._rescue = False stubs.Set(vmops.VMOps, "_shutdown", fake_shutdown) diff --git a/nova/twistd.py b/nova/twistd.py deleted file mode 100644 index 15cf67825..000000000 --- a/nova/twistd.py +++ /dev/null @@ -1,267 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Twisted daemon helpers, specifically to parse out gFlags from twisted flags, -manage pid files and support syslogging. -""" - -import gflags -import os -import signal -import sys -import time -from twisted.scripts import twistd -from twisted.python import log -from twisted.python import reflect -from twisted.python import runtime -from twisted.python import usage - -from nova import flags -from nova import log as logging - - -if runtime.platformType == "win32": - from twisted.scripts._twistw import ServerOptions -else: - from twisted.scripts._twistd_unix import ServerOptions - - -FLAGS = flags.FLAGS - - -class TwistdServerOptions(ServerOptions): - def parseArgs(self, *args): - return - - -class FlagParser(object): - # this is a required attribute for gflags - syntactic_help = '' - - def __init__(self, parser): - self.parser = parser - - def Parse(self, s): - return self.parser(s) - - -def WrapTwistedOptions(wrapped): - class TwistedOptionsToFlags(wrapped): - subCommands = None - - def __init__(self): - # NOTE(termie): _data exists because Twisted stuff expects - # to be able to set arbitrary things that are - # not actual flags - self._data = {} - self._flagHandlers = {} - self._paramHandlers = {} - - # Absorb the twistd flags into our FLAGS - self._absorbFlags() - self._absorbParameters() - self._absorbHandlers() - - wrapped.__init__(self) - - def _absorbFlags(self): - twistd_flags = [] - reflect.accumulateClassList(self.__class__, 'optFlags', - twistd_flags) - for flag in twistd_flags: - key = flag[0].replace('-', '_') - if hasattr(FLAGS, key): - continue - flags.DEFINE_boolean(key, None, str(flag[-1])) - - def _absorbParameters(self): - twistd_params = [] - reflect.accumulateClassList(self.__class__, 'optParameters', - twistd_params) - for param in twistd_params: - key = param[0].replace('-', '_') - if hasattr(FLAGS, key): - continue - if len(param) > 4: - flags.DEFINE(FlagParser(param[4]), - key, param[2], str(param[3]), - serializer=gflags.ArgumentSerializer()) - else: - flags.DEFINE_string(key, param[2], str(param[3])) - - def _absorbHandlers(self): - twistd_handlers = {} - reflect.addMethodNamesToDict(self.__class__, twistd_handlers, - "opt_") - - # NOTE(termie): Much of the following is derived/copied from - # twisted.python.usage with the express purpose of - # providing compatibility - for name in twistd_handlers.keys(): - method = getattr(self, 'opt_' + name) - - takesArg = not usage.flagFunction(method, name) - doc = getattr(method, '__doc__', None) - if not doc: - doc = 'undocumented' - - if not takesArg: - if name not in FLAGS: - flags.DEFINE_boolean(name, None, doc) - self._flagHandlers[name] = method - else: - if name not in FLAGS: - flags.DEFINE_string(name, None, doc) - self._paramHandlers[name] = method - - def _doHandlers(self): - for flag, handler in self._flagHandlers.iteritems(): - if self[flag]: - handler() - for param, handler in self._paramHandlers.iteritems(): - if self[param] is not None: - handler(self[param]) - - def __str__(self): - return str(FLAGS) - - def parseOptions(self, options=None): - if options is None: - options = sys.argv - else: - options.insert(0, '') - - args = FLAGS(options) - logging.setup() - argv = args[1:] - # ignore subcommands - - try: - self.parseArgs(*argv) - except TypeError: - raise usage.UsageError(_("Wrong number of arguments.")) - - self.postOptions() - return args - - def parseArgs(self, *args): - # TODO(termie): figure out a decent way of dealing with args - #return - wrapped.parseArgs(self, *args) - - def postOptions(self): - self._doHandlers() - - wrapped.postOptions(self) - - def __getitem__(self, key): - key = key.replace('-', '_') - try: - return getattr(FLAGS, key) - except (AttributeError, KeyError): - return self._data[key] - - def __setitem__(self, key, value): - key = key.replace('-', '_') - try: - return setattr(FLAGS, key, value) - except (AttributeError, KeyError): - self._data[key] = value - - def get(self, key, default): - key = key.replace('-', '_') - try: - return getattr(FLAGS, key) - except (AttributeError, KeyError): - self._data.get(key, default) - - return TwistedOptionsToFlags - - -def stop(pidfile): - """ - Stop the daemon - """ - # Get the pid from the pidfile - try: - pf = file(pidfile, 'r') - pid = int(pf.read().strip()) - pf.close() - except IOError: - pid = None - - if not pid: - message = _("pidfile %s does not exist. Daemon not running?\n") - sys.stderr.write(message % pidfile) - # Not an error in a restart - return - - # Try killing the daemon process - try: - while 1: - os.kill(pid, signal.SIGKILL) - time.sleep(0.1) - except OSError, err: - err = str(err) - if err.find(_("No such process")) > 0: - if os.path.exists(pidfile): - os.remove(pidfile) - else: - print str(err) - sys.exit(1) - - -def serve(filename): - logging.debug(_("Serving %s") % filename) - name = os.path.basename(filename) - OptionsClass = WrapTwistedOptions(TwistdServerOptions) - options = OptionsClass() - argv = options.parseOptions() - FLAGS.python = filename - FLAGS.no_save = True - if not FLAGS.pidfile: - FLAGS.pidfile = '%s.pid' % name - elif FLAGS.pidfile.endswith('twistd.pid'): - FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name) - if not FLAGS.prefix: - FLAGS.prefix = name - elif FLAGS.prefix.endswith('twisted'): - FLAGS.prefix = FLAGS.prefix.replace('twisted', name) - - action = 'start' - if len(argv) > 1: - action = argv.pop() - - if action == 'stop': - stop(FLAGS.pidfile) - sys.exit() - elif action == 'restart': - stop(FLAGS.pidfile) - elif action == 'start': - pass - else: - print 'usage: %s [options] [start|stop|restart]' % argv[0] - sys.exit(1) - - logging.debug(_("Full set of FLAGS:")) - for flag in FLAGS: - logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) - - logging.audit(_("Starting %s"), name) - twistd.runApp(options) diff --git a/nova/utils.py b/nova/utils.py index 737903f81..4ea623cc1 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -513,25 +513,61 @@ def utf8(value): return value -def to_primitive(value): - if type(value) is type([]) or type(value) is type((None,)): - o = [] - for v in value: - o.append(to_primitive(v)) - return o - elif type(value) is type({}): - o = {} - for k, v in value.iteritems(): - o[k] = to_primitive(v) - return o - elif isinstance(value, datetime.datetime): - return str(value) - elif hasattr(value, 'iteritems'): - return to_primitive(dict(value.iteritems())) - elif hasattr(value, '__iter__'): - return to_primitive(list(value)) - else: - return value +def to_primitive(value, convert_instances=False, level=0): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + if inspect.isclass(value): + return unicode(value) + + if level > 3: + return [] + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + if type(value) is type([]) or type(value) is type((None,)): + o = [] + for v in value: + o.append(to_primitive(v, convert_instances=convert_instances, + level=level)) + return o + elif type(value) is type({}): + o = {} + for k, v in value.iteritems(): + o[k] = to_primitive(v, convert_instances=convert_instances, + level=level) + return o + elif isinstance(value, datetime.datetime): + return str(value) + elif hasattr(value, 'iteritems'): + return to_primitive(dict(value.iteritems()), + convert_instances=convert_instances, + level=level) + elif hasattr(value, '__iter__'): + return to_primitive(list(value), level) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return to_primitive(value.__dict__, + convert_instances=convert_instances, + level=level + 1) + else: + return value + except TypeError, e: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return unicode(value) def dumps(value): diff --git a/nova/virt/driver.py b/nova/virt/driver.py index b219fb2cb..4f3cfefad 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -40,6 +40,7 @@ class ComputeDriver(object): def init_host(self, host): """Adopt existing VM's running here""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_info(self, instance_name): @@ -52,16 +53,20 @@ class ComputeDriver(object): :num_cpu: (int) the number of virtual CPUs for the domain :cpu_time: (int) the CPU time used in nanoseconds """ + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def list_instances(self): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def list_instances_detail(self): """Return a list of InstanceInfo for all registered VMs""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() - def spawn(self, instance, network_info, block_device_mapping=None): + def spawn(self, context, instance, network_info, + block_device_mapping=None): """Launch a VM for the specified instance""" raise NotImplementedError() @@ -79,29 +84,36 @@ class ComputeDriver(object): warning in that case. """ + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def reboot(self, instance, network_info): """Reboot specified VM""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def snapshot_instance(self, context, instance_id, image_id): raise NotImplementedError() def get_console_pool_info(self, console_type): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_console_output(self, instance): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_ajax_console(self, instance): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_diagnostics(self, instance): """Return data about VM diagnostics""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_host_ip_addr(self): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def attach_volume(self, context, instance_id, volume_id, mountpoint): @@ -116,43 +128,50 @@ class ComputeDriver(object): def migrate_disk_and_power_off(self, instance, dest): """Transfers the VHD of a running instance to another host, then shuts off the instance copies over the COW disk""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() - def snapshot(self, instance, image_id): + def snapshot(self, context, instance, image_id): """Create snapshot from a running VM instance.""" raise NotImplementedError() - def finish_migration(self, instance, disk_info, network_info, + def finish_migration(self, context, instance, disk_info, network_info, resize_instance): """Completes a resize, turning on the migrated instance""" raise NotImplementedError() def revert_migration(self, instance): """Reverts a resize, powering back on the instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def pause(self, instance, callback): """Pause VM instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def unpause(self, instance, callback): """Unpause paused VM instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def suspend(self, instance, callback): """suspend the specified instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def resume(self, instance, callback): """resume the specified instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() - def rescue(self, instance, callback, network_info): + def rescue(self, context, instance, callback, network_info): """Rescue the specified instance""" raise NotImplementedError() def unrescue(self, instance, callback, network_info): """Unrescue the specified instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def update_available_resource(self, ctxt, host): @@ -165,6 +184,7 @@ class ComputeDriver(object): :param host: hostname that compute manager is currently running """ + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def live_migration(self, ctxt, instance_ref, dest, @@ -184,20 +204,25 @@ class ComputeDriver(object): expected nova.compute.manager.recover_live_migration. """ + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def refresh_security_group_rules(self, security_group_id): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def refresh_security_group_members(self, security_group_id): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def refresh_provider_fw_rules(self, security_group_id): """See: nova/virt/fake.py for docs.""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def reset_network(self, instance): """reset networking for specified instance""" + # TODO(Vek): Need to pass context in for access to auth_token pass def ensure_filtering_rules_for_instance(self, instance_ref): @@ -223,10 +248,12 @@ class ComputeDriver(object): :params instance_ref: nova.db.sqlalchemy.models.Instance object """ + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def unfilter_instance(self, instance, network_info): """Stop filtering instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def set_admin_password(self, context, instance_id, new_pass=None): @@ -237,24 +264,30 @@ class ComputeDriver(object): """Create a file on the VM instance. The file path and contents should be base64-encoded. """ + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def agent_update(self, instance, url, md5hash): """Update agent on the VM instance.""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def inject_network_info(self, instance, nw_info): """inject network info for specified instance""" + # TODO(Vek): Need to pass context in for access to auth_token pass def poll_rescued_instances(self, timeout): """Poll for rescued instances""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def plug_vifs(self, instance, network_info): """Plugs in VIFs to networks.""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 26bc421c0..80abcc644 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -129,7 +129,8 @@ class FakeConnection(driver.ComputeDriver): info_list.append(self._map_to_instance_info(instance)) return info_list - def spawn(self, instance, network_info, block_device_mapping=None): + def spawn(self, context, instance, network_info, + block_device_mapping=None): """ Create a new instance/VM/domain on the virtualization platform. @@ -153,7 +154,7 @@ class FakeConnection(driver.ComputeDriver): fake_instance = FakeInstance(name, state) self.instances[name] = fake_instance - def snapshot(self, instance, name): + def snapshot(self, context, instance, name): """ Snapshots the specified instance. @@ -240,7 +241,7 @@ class FakeConnection(driver.ComputeDriver): """ pass - def rescue(self, instance, callback, network_info): + def rescue(self, context, instance, callback, network_info): """ Rescue the specified instance. """ @@ -340,8 +341,7 @@ class FakeConnection(driver.ComputeDriver): only useful for giving back to this layer as a parameter to disk_stats). These IDs only need to be unique for a given instance. - Note that this function takes an instance ID, not a - compute.service.Instance, so that it can be called by compute.monitor. + Note that this function takes an instance ID. """ return ['A_DISK'] @@ -353,8 +353,7 @@ class FakeConnection(driver.ComputeDriver): interface_stats). These IDs only need to be unique for a given instance. - Note that this function takes an instance ID, not a - compute.service.Instance, so that it can be called by compute.monitor. + Note that this function takes an instance ID. """ return ['A_VIF'] @@ -374,8 +373,7 @@ class FakeConnection(driver.ComputeDriver): having to do the aggregation. On those platforms, this method is unused. - Note that this function takes an instance ID, not a - compute.service.Instance, so that it can be called by compute.monitor. + Note that this function takes an instance ID. """ return [0L, 0L, 0L, 0L, None] @@ -395,8 +393,7 @@ class FakeConnection(driver.ComputeDriver): having to do the aggregation. On those platforms, this method is unused. - Note that this function takes an instance ID, not a - compute.service.Instance, so that it can be called by compute.monitor. + Note that this function takes an instance ID. """ return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L] diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index c26fe108b..3428a7fc1 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -138,7 +138,8 @@ class HyperVConnection(driver.ComputeDriver): return instance_infos - def spawn(self, instance, network_info, block_device_mapping=None): + def spawn(self, context, instance, network_info, + block_device_mapping=None): """ Create a new VM and start it.""" vm = self._lookup(instance.name) if vm is not None: diff --git a/nova/virt/images.py b/nova/virt/images.py index 2e9fca3d6..54c691a40 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -21,7 +21,6 @@ Handling of VM disk images. """ -from nova import context from nova import flags from nova.image import glance as glance_image_service import nova.image @@ -33,13 +32,12 @@ FLAGS = flags.FLAGS LOG = logging.getLogger('nova.virt.images') -def fetch(image_href, path, _user_id, _project_id): +def fetch(context, image_href, path, _user_id, _project_id): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. (image_service, image_id) = nova.image.get_image_service(image_href) with open(path, "wb") as image_file: - elevated = context.get_admin_context() - metadata = image_service.get(elevated, image_id, image_file) + metadata = image_service.get(context, image_id, image_file) return metadata diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 17c328a83..0acf25d28 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -54,7 +54,7 @@ from xml.etree import ElementTree from eventlet import greenthread from eventlet import tpool -from nova import context +from nova import context as nova_context from nova import db from nova import exception from nova import flags @@ -174,7 +174,7 @@ class LibvirtConnection(driver.ComputeDriver): def init_host(self, host): # Adopt existing VM's running here - ctxt = context.get_admin_context() + ctxt = nova_context.get_admin_context() for instance in db.instance_get_all_by_host(ctxt, host): try: LOG.debug(_('Checking state of %s'), instance['name']) @@ -396,7 +396,7 @@ class LibvirtConnection(driver.ComputeDriver): virt_dom.detachDevice(xml) @exception.wrap_exception() - def snapshot(self, instance, image_href): + def snapshot(self, context, instance, image_href): """Create snapshot from a running VM instance. This command only works with qemu 0.14+, the qemu_img flag is @@ -405,14 +405,13 @@ class LibvirtConnection(driver.ComputeDriver): """ virt_dom = self._lookup_by_name(instance['name']) - elevated = context.get_admin_context() (image_service, image_id) = nova.image.get_image_service( instance['image_ref']) - base = image_service.show(elevated, image_id) + base = image_service.show(context, image_id) (snapshot_image_service, snapshot_image_id) = \ nova.image.get_image_service(image_href) - snapshot = snapshot_image_service.show(elevated, snapshot_image_id) + snapshot = snapshot_image_service.show(context, snapshot_image_id) metadata = {'disk_format': base['disk_format'], 'container_format': base['container_format'], @@ -463,7 +462,7 @@ class LibvirtConnection(driver.ComputeDriver): # Upload that image to the image service with open(out_path) as image_file: - image_service.update(elevated, + image_service.update(context, image_href, metadata, image_file) @@ -538,7 +537,7 @@ class LibvirtConnection(driver.ComputeDriver): dom.create() @exception.wrap_exception() - def rescue(self, instance, callback, network_info): + def rescue(self, context, instance, callback, network_info): """Loads a VM using rescue images. A rescue is normally performed when something goes wrong with the @@ -553,7 +552,7 @@ class LibvirtConnection(driver.ComputeDriver): rescue_images = {'image_id': FLAGS.rescue_image_id, 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} - self._create_image(instance, xml, '.rescue', rescue_images) + self._create_image(context, instance, xml, '.rescue', rescue_images) self._create_new_domain(xml) def _wait_for_rescue(): @@ -592,13 +591,14 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) @exception.wrap_exception() - def spawn(self, instance, network_info, block_device_mapping=None): + def spawn(self, context, instance, network_info, + block_device_mapping=None): xml = self.to_xml(instance, False, network_info=network_info, block_device_mapping=block_device_mapping) block_device_mapping = block_device_mapping or [] self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) - self._create_image(instance, xml, network_info=network_info, + self._create_image(context, instance, xml, network_info=network_info, block_device_mapping=block_device_mapping) domain = self._create_new_domain(xml) LOG.debug(_("instance %s: is running"), instance['name']) @@ -769,9 +769,10 @@ class LibvirtConnection(driver.ComputeDriver): else: utils.execute('cp', base, target) - def _fetch_image(self, target, image_id, user_id, project_id, size=None): + def _fetch_image(self, context, target, image_id, user_id, project_id, + size=None): """Grab image and optionally attempt to resize it""" - images.fetch(image_id, target, user_id, project_id) + images.fetch(context, image_id, target, user_id, project_id) if size: disk.extend(target, size) @@ -780,8 +781,9 @@ class LibvirtConnection(driver.ComputeDriver): utils.execute('truncate', target, '-s', "%dG" % local_gb) # TODO(vish): should we format disk by default? - def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None, - network_info=None, block_device_mapping=None): + def _create_image(self, context, inst, libvirt_xml, suffix='', + disk_images=None, network_info=None, + block_device_mapping=None): block_device_mapping = block_device_mapping or [] if not suffix: @@ -817,6 +819,7 @@ class LibvirtConnection(driver.ComputeDriver): if disk_images['kernel_id']: fname = '%08x' % int(disk_images['kernel_id']) self._cache_image(fn=self._fetch_image, + context=context, target=basepath('kernel'), fname=fname, image_id=disk_images['kernel_id'], @@ -825,6 +828,7 @@ class LibvirtConnection(driver.ComputeDriver): if disk_images['ramdisk_id']: fname = '%08x' % int(disk_images['ramdisk_id']) self._cache_image(fn=self._fetch_image, + context=context, target=basepath('ramdisk'), fname=fname, image_id=disk_images['ramdisk_id'], @@ -843,6 +847,7 @@ class LibvirtConnection(driver.ComputeDriver): if not self._volume_in_mapping(self.root_mount_device, block_device_mapping): self._cache_image(fn=self._fetch_image, + context=context, target=basepath('disk'), fname=root_fname, cow=FLAGS.use_cow_images, @@ -879,7 +884,7 @@ class LibvirtConnection(driver.ComputeDriver): ifc_template = open(FLAGS.injected_network_template).read() ifc_num = -1 have_injected_networks = False - admin_context = context.get_admin_context() + admin_context = nova_context.get_admin_context() for (network_ref, mapping) in network_info: ifc_num += 1 @@ -1087,8 +1092,7 @@ class LibvirtConnection(driver.ComputeDriver): def get_disks(self, instance_name): """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. + Note that this function takes an instance name. Returns a list of all block devices for this domain. """ @@ -1129,8 +1133,7 @@ class LibvirtConnection(driver.ComputeDriver): def get_interfaces(self, instance_name): """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. + Note that this function takes an instance name. Returns a list of all network interfaces for this instance. """ @@ -1345,16 +1348,14 @@ class LibvirtConnection(driver.ComputeDriver): def block_stats(self, instance_name, disk): """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. + Note that this function takes an instance name. """ domain = self._lookup_by_name(instance_name) return domain.blockStats(disk) def interface_stats(self, instance_name, interface): """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. + Note that this function takes an instance name. """ domain = self._lookup_by_name(instance_name) return domain.interfaceStats(interface) diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 1ee8fa1c0..07a6ba6ab 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -26,7 +26,7 @@ import urllib import urllib2
import uuid
-from nova import context
+from nova import context as nova_context
from nova import db
from nova import exception
from nova import flags
@@ -89,7 +89,7 @@ class VMWareVMOps(object): LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
return lst_vm_names
- def spawn(self, instance, network_info):
+ def spawn(self, context, instance, network_info):
"""
Creates a VM instance.
@@ -111,7 +111,7 @@ class VMWareVMOps(object): client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
- network = db.network_get_by_instance(context.get_admin_context(),
+ network = db.network_get_by_instance(nova_context.get_admin_context(),
instance['id'])
net_name = network['bridge']
@@ -329,7 +329,7 @@ class VMWareVMOps(object): LOG.debug(_("Powered on the VM instance %s") % instance.name)
_power_on_vm()
- def snapshot(self, instance, snapshot_name):
+ def snapshot(self, context, instance, snapshot_name):
"""
Create snapshot from a running VM instance.
Steps followed are:
@@ -721,11 +721,11 @@ class VMWareVMOps(object): Set the machine id of the VM for guest tools to pick up and change
the IP.
"""
- admin_context = context.get_admin_context()
+ admin_context = nova_context.get_admin_context()
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
- network = db.network_get_by_instance(context.get_admin_context(),
+ network = db.network_get_by_instance(nova_context.get_admin_context(),
instance['id'])
mac_address = None
if instance['mac_addresses']:
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py index ce57847b2..3d209fa99 100644 --- a/nova/virt/vmwareapi_conn.py +++ b/nova/virt/vmwareapi_conn.py @@ -124,13 +124,14 @@ class VMWareESXConnection(driver.ComputeDriver): """List VM instances."""
return self._vmops.list_instances()
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance, network_info,
+ block_device_mapping=None):
"""Create VM instance."""
- self._vmops.spawn(instance, network_info)
+ self._vmops.spawn(context, instance, network_info)
- def snapshot(self, instance, name):
+ def snapshot(self, context, instance, name):
"""Create snapshot from a running VM instance."""
- self._vmops.snapshot(instance, name)
+ self._vmops.snapshot(context, instance, name)
def reboot(self, instance, network_info):
"""Reboot VM instance."""
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index c9bcb801c..63bc191cf 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -342,7 +342,7 @@ class VMHelper(HelperBase): return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid) @classmethod - def upload_image(cls, session, instance, vdi_uuids, image_id): + def upload_image(cls, context, session, instance, vdi_uuids, image_id): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ @@ -360,42 +360,30 @@ class VMHelper(HelperBase): 'glance_host': glance_host, 'glance_port': glance_port, 'sr_path': cls.get_sr_path(session), - 'os_type': os_type} + 'os_type': os_type, + 'auth_token': getattr(context, 'auth_token', None)} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'upload_vhd', kwargs) session.wait_for_task(task, instance.id) @classmethod - def fetch_image(cls, session, instance_id, image, user_id, project_id, - image_type): - """ - image_type is interpreted as an ImageType instance - Related flags: - xenapi_image_service = ['glance', 'objectstore'] - glance_address = 'address for glance services' - glance_port = 'port for glance services' + def fetch_image(cls, context, session, instance_id, image, user_id, + project_id, image_type): + """Fetch image from glance based on image type. - Returns: A single filename if image_type is KERNEL_RAMDISK + Returns: A single filename if image_type is KERNEL or RAMDISK A list of dictionaries that describe VDIs, otherwise """ - - if FLAGS.xenapi_image_service == 'glance': - return cls._fetch_image_glance(session, instance_id, - image, image_type) + if image_type == ImageType.DISK_VHD: + return cls._fetch_image_glance_vhd(context, + session, instance_id, image, image_type) else: - # TODO(vish): this shouldn't be used anywhere anymore and - # can probably be removed - from nova.auth.manager import AuthManager - manager = AuthManager() - access = manager.get_access_key(user_id, project_id) - secret = manager.get_user(user_id).secret - return cls._fetch_image_objectstore(session, instance_id, image, - access, secret, - image_type) + return cls._fetch_image_glance_disk(context, + session, instance_id, image, image_type) @classmethod - def _fetch_image_glance_vhd(cls, session, instance_id, image, + def _fetch_image_glance_vhd(cls, context, session, instance_id, image, image_type): """Tell glance to download an image and put the VHDs into the SR @@ -417,7 +405,8 @@ class VMHelper(HelperBase): 'glance_host': glance_host, 'glance_port': glance_port, 'uuid_stack': uuid_stack, - 'sr_path': cls.get_sr_path(session)} + 'sr_path': cls.get_sr_path(session), + 'auth_token': getattr(context, 'auth_token', None)} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) @@ -443,7 +432,7 @@ class VMHelper(HelperBase): return vdis @classmethod - def _fetch_image_glance_disk(cls, session, instance_id, image, + def _fetch_image_glance_disk(cls, context, session, instance_id, image, image_type): """Fetch the image from Glance @@ -463,6 +452,7 @@ class VMHelper(HelperBase): sr_ref = safe_find_sr(session) glance_client, image_id = nova.image.get_glance_client(image) + glance_client.set_auth_token(getattr(context, 'auth_token', None)) meta, image_file = glance_client.get_image(image_id) virtual_size = int(meta['size']) vdi_size = virtual_size @@ -566,135 +556,38 @@ class VMHelper(HelperBase): else: return ImageType.DISK_RAW - # FIXME(sirp): can we unify the ImageService and xenapi_image_service - # abstractions? - if FLAGS.xenapi_image_service == 'glance': - image_type = determine_from_glance() - else: - image_type = determine_from_instance() + image_type = determine_from_glance() log_disk_format(image_type) return image_type @classmethod - def _fetch_image_glance(cls, session, instance_id, image, image_type): - """Fetch image from glance based on image type. - - Returns: A single filename if image_type is KERNEL or RAMDISK - A list of dictionaries that describe VDIs, otherwise - """ - if image_type == ImageType.DISK_VHD: - return cls._fetch_image_glance_vhd( - session, instance_id, image, image_type) - else: - return cls._fetch_image_glance_disk( - session, instance_id, image, image_type) - - @classmethod - def _fetch_image_objectstore(cls, session, instance_id, image, access, - secret, image_type): - """Fetch an image from objectstore. - - Returns: A single filename if image_type is KERNEL or RAMDISK - A list of dictionaries that describe VDIs, otherwise - """ - url = "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, - image) - LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) - if image_type in (ImageType.KERNEL, ImageType.RAMDISK): - fn = 'get_kernel' - else: - fn = 'get_vdi' - args = {} - args['src_url'] = url - args['username'] = access - args['password'] = secret - args['add_partition'] = 'false' - args['raw'] = 'false' - if not image_type in (ImageType.KERNEL, ImageType.RAMDISK): - args['add_partition'] = 'true' - if image_type == ImageType.DISK_RAW: - args['raw'] = 'true' - task = session.async_call_plugin('objectstore', fn, args) - vdi_uuid = None - filename = None - if image_type in (ImageType.KERNEL, ImageType.RAMDISK): - filename = session.wait_for_task(task, instance_id) - else: - vdi_uuid = session.wait_for_task(task, instance_id) - return [dict(vdi_type=ImageType.to_string(image_type), - vdi_uuid=vdi_uuid, - file=filename)] - - @classmethod def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type, os_type): """ Determine whether the VM will use a paravirtualized kernel or if it will use hardware virtualization. - 1. Objectstore (any image type): - We use plugin to figure out whether the VDI uses PV - - 2. Glance (VHD): then we use `os_type`, raise if not set - - 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is - available - - 4. Glance (DISK): pv is assumed - """ - if FLAGS.xenapi_image_service == 'glance': - # 2, 3, 4: Glance - return cls._determine_is_pv_glance( - session, vdi_ref, disk_image_type, os_type) - else: - # 1. Objecstore - return cls._determine_is_pv_objectstore(session, instance_id, - vdi_ref) - - @classmethod - def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref): - LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) - fn = "is_vdi_pv" - args = {} - args['vdi-ref'] = vdi_ref - task = session.async_call_plugin('objectstore', fn, args) - pv_str = session.wait_for_task(task, instance_id) - pv = None - if pv_str.lower() == 'true': - pv = True - elif pv_str.lower() == 'false': - pv = False - LOG.debug(_("PV Kernel in VDI:%s"), pv) - return pv - - @classmethod - def _determine_is_pv_glance(cls, session, vdi_ref, disk_image_type, - os_type): - """ - For a Glance image, determine if we need paravirtualization. - - The relevant scenarios are: - 2. Glance (VHD): then we use `os_type`, raise if not set + 1. Glance (VHD): then we use `os_type`, raise if not set - 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is + 2. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is available - 4. Glance (DISK): pv is assumed + 3. Glance (DISK): pv is assumed """ LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) if disk_image_type == ImageType.DISK_VHD: - # 2. VHD + # 1. VHD if os_type == 'windows': is_pv = False else: is_pv = True elif disk_image_type == ImageType.DISK_RAW: - # 3. RAW + # 2. RAW is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv) elif disk_image_type == ImageType.DISK: - # 4. Disk + # 3. Disk is_pv = True else: raise exception.Error(_("Unknown image format %(disk_image_type)s") diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 7e02e1def..b3b812a48 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -30,7 +30,7 @@ import sys import time import uuid -from nova import context +from nova import context as nova_context from nova import db from nova import exception from nova import flags @@ -113,11 +113,11 @@ class VMOps(object): vm_ref = VMHelper.lookup(self._session, instance.name) self._start(instance, vm_ref) - def finish_migration(self, instance, disk_info, network_info, + def finish_migration(self, context, instance, disk_info, network_info, resize_instance): vdi_uuid = self.link_disks(instance, disk_info['base_copy'], disk_info['cow']) - vm_ref = self._create_vm(instance, + vm_ref = self._create_vm(context, instance, [dict(vdi_type='os', vdi_uuid=vdi_uuid)], network_info) if resize_instance: @@ -134,19 +134,19 @@ class VMOps(object): LOG.debug(_("Starting instance %s"), instance.name) self._session.call_xenapi('VM.start', vm_ref, False, False) - def _create_disks(self, instance): + def _create_disks(self, context, instance): disk_image_type = VMHelper.determine_disk_image_type(instance) - vdis = VMHelper.fetch_image(self._session, + vdis = VMHelper.fetch_image(context, self._session, instance.id, instance.image_ref, instance.user_id, instance.project_id, disk_image_type) return vdis - def spawn(self, instance, network_info): + def spawn(self, context, instance, network_info): vdis = None try: - vdis = self._create_disks(instance) - vm_ref = self._create_vm(instance, vdis, network_info) + vdis = self._create_disks(context, instance) + vm_ref = self._create_vm(context, instance, vdis, network_info) self._spawn(instance, vm_ref) except (self.XenAPI.Failure, OSError, IOError) as spawn_error: LOG.exception(_("instance %s: Failed to spawn"), @@ -156,11 +156,11 @@ class VMOps(object): self._handle_spawn_error(vdis, spawn_error) raise spawn_error - def spawn_rescue(self, instance): + def spawn_rescue(self, context, instance, network_info): """Spawn a rescue instance.""" - self.spawn(instance) + self.spawn(context, instance, network_info) - def _create_vm(self, instance, vdis, network_info): + def _create_vm(self, context, instance, vdis, network_info): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) @@ -171,7 +171,7 @@ class VMOps(object): if not VMHelper.ensure_free_mem(self._session, instance): LOG.exception(_('instance %(instance_name)s: not enough free ' 'memory') % locals()) - db.instance_set_state(context.get_admin_context(), + db.instance_set_state(nova_context.get_admin_context(), instance['id'], power_state.SHUTDOWN) return @@ -181,12 +181,12 @@ class VMOps(object): ramdisk = None try: if instance.kernel_id: - kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, instance.user_id, + kernel = VMHelper.fetch_image(context, self._session, + instance.id, instance.kernel_id, instance.user_id, instance.project_id, ImageType.KERNEL)[0] if instance.ramdisk_id: - ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, instance.user_id, + ramdisk = VMHelper.fetch_image(context, self._session, + instance.id, instance.kernel_id, instance.user_id, instance.project_id, ImageType.RAMDISK)[0] # Create the VM ref and attach the first disk first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', @@ -206,7 +206,7 @@ class VMOps(object): if instance.vm_mode != vm_mode: # Update database with normalized (or determined) value - db.instance_update(context.get_admin_context(), + db.instance_update(nova_context.get_admin_context(), instance['id'], {'vm_mode': vm_mode}) vm_ref = VMHelper.create_vm(self._session, instance, kernel and kernel.get('file', None) or None, @@ -268,7 +268,7 @@ class VMOps(object): LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.') % locals()) - ctx = context.get_admin_context() + ctx = nova_context.get_admin_context() agent_build = db.agent_build_get_by_triple(ctx, 'xen', instance.os_type, instance.architecture) if agent_build: @@ -412,7 +412,7 @@ class VMOps(object): # if instance_or_vm is an int/long it must be instance id elif isinstance(instance_or_vm, (int, long)): - ctx = context.get_admin_context() + ctx = nova_context.get_admin_context() instance_obj = db.instance_get(ctx, instance_or_vm) instance_name = instance_obj.name else: @@ -437,9 +437,10 @@ class VMOps(object): vm, "start") - def snapshot(self, instance, image_id): + def snapshot(self, context, instance, image_id): """Create snapshot from a running VM instance. + :param context: request context :param instance: instance to be snapshotted :param image_id: id of image to upload to @@ -464,7 +465,7 @@ class VMOps(object): try: template_vm_ref, template_vdi_uuids = self._get_snapshot(instance) # call plugin to ship snapshot off to glance - VMHelper.upload_image( + VMHelper.upload_image(context, self._session, instance, template_vdi_uuids, image_id) finally: if template_vm_ref: @@ -685,7 +686,7 @@ class VMOps(object): # Successful return code from password is '0' if resp_dict['returncode'] != '0': raise RuntimeError(resp_dict['message']) - db.instance_update(context.get_admin_context(), + db.instance_update(nova_context.get_admin_context(), instance['id'], dict(admin_pass=new_pass)) return resp_dict['message'] @@ -913,7 +914,7 @@ class VMOps(object): True) self._wait_with_callback(instance.id, task, callback) - def rescue(self, instance, callback): + def rescue(self, context, instance, callback, network_info): """Rescue the specified instance. - shutdown the instance VM. @@ -931,7 +932,7 @@ class VMOps(object): self._shutdown(instance, vm_ref) self._acquire_bootlock(vm_ref) instance._rescue = True - self.spawn_rescue(instance) + self.spawn_rescue(context, instance, network_info) rescue_vm_ref = VMHelper.lookup(self._session, instance.name) vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index cc18ed83c..39afbd650 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -101,9 +101,6 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') -flags.DEFINE_string('xenapi_image_service', - 'glance', - 'Where to get VM images: glance or objectstore.') flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval', 5.0, 'The interval used for polling of coalescing vhds.' @@ -187,23 +184,24 @@ class XenAPIConnection(driver.ComputeDriver): def list_instances_detail(self): return self._vmops.list_instances_detail() - def spawn(self, instance, network_info, block_device_mapping=None): + def spawn(self, context, instance, network_info, + block_device_mapping=None): """Create VM instance""" - self._vmops.spawn(instance, network_info) + self._vmops.spawn(context, instance, network_info) def revert_migration(self, instance): """Reverts a resize, powering back on the instance""" self._vmops.revert_resize(instance) - def finish_migration(self, instance, disk_info, network_info, + def finish_migration(self, context, instance, disk_info, network_info, resize_instance=False): """Completes a resize, turning on the migrated instance""" - self._vmops.finish_migration(instance, disk_info, network_info, - resize_instance) + self._vmops.finish_migration(context, instance, disk_info, + network_info, resize_instance) - def snapshot(self, instance, image_id): + def snapshot(self, context, instance, image_id): """ Create snapshot from a running VM instance """ - self._vmops.snapshot(instance, image_id) + self._vmops.snapshot(context, instance, image_id) def reboot(self, instance, network_info): """Reboot VM instance""" @@ -244,9 +242,9 @@ class XenAPIConnection(driver.ComputeDriver): """resume the specified instance""" self._vmops.resume(instance, callback) - def rescue(self, instance, callback, network_info): + def rescue(self, context, instance, callback, network_info): """Rescue the specified instance""" - self._vmops.rescue(instance, callback) + self._vmops.rescue(context, instance, callback, network_info) def unrescue(self, instance, callback, network_info): """Unrescue the specified instance""" diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index fbe080b22..86e837849 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -67,12 +67,17 @@ def _copy_kernel_vdi(dest, copy_args): def _download_tarball(sr_path, staging_path, image_id, glance_host, - glance_port): + glance_port, auth_token): """Download the tarball image from Glance and extract it into the staging area. """ + # Build request headers + headers = {} + if auth_token: + headers['x-auth-token'] = auth_token + conn = httplib.HTTPConnection(glance_host, glance_port) - conn.request('GET', '/v1/images/%s' % image_id) + conn.request('GET', '/v1/images/%s' % image_id, headers=headers) resp = conn.getresponse() if resp.status == httplib.NOT_FOUND: raise Exception("Image '%s' not found in Glance" % image_id) @@ -236,7 +241,8 @@ def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): os.link(source, link_name) -def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type): +def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type, + auth_token): """ Create a tarball of the image and then stream that into Glance using chunked-transfer-encoded HTTP. @@ -263,6 +269,10 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type): 'x-image-meta-container-format': 'ovf', 'x-image-meta-property-os-type': os_type} + # If we have an auth_token, set an x-auth-token header + if auth_token: + headers['x-auth-token'] = auth_token + for header, value in headers.iteritems(): conn.putheader(header, value) conn.endheaders() @@ -364,11 +374,12 @@ def download_vhd(session, args): glance_port = params["glance_port"] uuid_stack = params["uuid_stack"] sr_path = params["sr_path"] + auth_token = params["auth_token"] staging_path = _make_staging_area(sr_path) try: _download_tarball(sr_path, staging_path, image_id, glance_host, - glance_port) + glance_port, auth_token) # Right now, it's easier to return a single string via XenAPI, # so we'll json encode the list of VHDs. return json.dumps(_import_vhds(sr_path, staging_path, uuid_stack)) @@ -386,12 +397,13 @@ def upload_vhd(session, args): glance_port = params["glance_port"] sr_path = params["sr_path"] os_type = params["os_type"] + auth_token = params["auth_token"] staging_path = _make_staging_area(sr_path) try: _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids) _upload_tarball(staging_path, image_id, glance_host, glance_port, - os_type) + os_type, auth_token) finally: _cleanup_staging_area(staging_path) @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2263,10 +2208,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2265,10 +2210,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2263,10 +2208,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -131,33 +131,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "PID-Datei %s existiert nicht. Läuft der Daemon nicht?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "Kein passender Prozess gefunden" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "Bedient %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "Alle vorhandenen FLAGS:" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "%s wird gestartet" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1785,34 +1758,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2270,10 +2215,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" diff --git a/po/en_AU.po b/po/en_AU.po index e53f9fc07..3fa62c006 100644 --- a/po/en_AU.po +++ b/po/en_AU.po @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2263,10 +2208,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" diff --git a/po/en_GB.po b/po/en_GB.po index 601f6170b..b204c93a1 100644 --- a/po/en_GB.po +++ b/po/en_GB.po @@ -130,33 +130,6 @@ msgstr "compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "compute.api::resume %s" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "Wrong number of arguments." - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "pidfile %s does not exist. Daemon not running?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "No such process" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "Serving %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "Full set of FLAGS:" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Starting %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1803,34 +1776,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2288,10 +2233,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -130,33 +130,6 @@ msgstr "compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "compute.api::resume %s" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "Cantidad de argumentos incorrecta" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "El \"pidfile\" %s no existe. Quizás el servicio no este corriendo.\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "No existe el proceso" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "Sirviendo %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "Conjunto completo de opciones (FLAGS):" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Iniciando %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1819,34 +1792,6 @@ msgstr "" msgid "Got exception: %s" msgstr "Obtenida excepción %s" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "actualizando %s..." - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "error inesperado durante la actualización" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "excepción inexperada al obtener la conexión" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "Encontrada interfaz: %s" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2309,10 +2254,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -133,35 +133,6 @@ msgstr "compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "compute.api::resume %s" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "Nombre d'arguments incorrect." - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" -"Le fichier pid %s n'existe pas. Est-ce que le processus est en cours " -"d'exécution ?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "Aucun processus de ce type" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "En train de servir %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "Ensemble de propriétés complet :" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Démarrage de %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1865,34 +1836,6 @@ msgstr "Tâche [%(name)s] %(task)s état : %(status)s %(error_info)s" msgid "Got exception: %s" msgstr "Reçu exception : %s" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "mise à jour %s..." - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "erreur inopinée pendant la ise à jour" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "Ne peut pas récupérer blockstats pour \"%(disk)s\" sur \"%(iid)s\"" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "Ne peut pas récupérer ifstats pour \"%(interface)s\" sur \"%(iid)s\"" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "erreur inopinée pendant la connexion" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "Instance trouvée : %s" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2373,10 +2316,6 @@ msgstr "Démarrage %(arg0)s sur %(host)s:%(port)s" msgid "You must implement __call__" msgstr "Vous devez implémenter __call__" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "Démarrage du superviseur d'instance" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "Allocation IP" @@ -134,34 +134,6 @@ msgstr "compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "compute.api::resume %s" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "Numero errato di argomenti" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" -"Il pidfile %s non esiste. Assicurarsi che il demone é in esecuzione.\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "Nessun processo trovato" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "Servire %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "Insieme di FLAGS:" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Avvio di %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1791,34 +1763,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2278,10 +2222,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -130,33 +130,6 @@ msgstr "例外: compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "例外: compute.api::resume %s" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "引数の数が異なります。" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "pidfile %s が存在しません。デーモンは実行中ですか?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "そのようなプロセスはありません" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "%s サービスの開始" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "FLAGSの一覧:" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "%s を起動中" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1808,34 +1781,6 @@ msgstr "タスク [%(name)s] %(task)s 状態: %(status)s %(error_info)s" msgid "Got exception: %s" msgstr "例外 %s が発生しました。" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "%s の情報の更新…" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "更新の最中に予期しないエラーが発生しました。" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "\"%(iid)s\" 上の \"%(disk)s\" 用のブロック統計(blockstats)が取得できません" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "\"%(iid)s\" 上の %(interface)s\" 用インターフェース統計(ifstats)が取得できません" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "接続に際し予期しないエラーが発生しました。" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "インスタンス %s が見つかりました。" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2311,10 +2256,6 @@ msgstr "%(host)s:%(port)s 上で %(arg0)s を開始しています" msgid "You must implement __call__" msgstr "__call__ を実装しなければなりません" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "インスタンスモニタを開始しています" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "IP アドレスをリースしました" diff --git a/po/nova.pot b/po/nova.pot index 58140302d..e180ed750 100644 --- a/po/nova.pot +++ b/po/nova.pot @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2263,10 +2208,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" diff --git a/po/pt_BR.po b/po/pt_BR.po index f067a69e0..b3aefce44 100644 --- a/po/pt_BR.po +++ b/po/pt_BR.po @@ -126,34 +126,6 @@ msgstr "compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "compute.api::resume %s" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "Número errado de argumentos." - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" -"Arquivo do id do processo (pidfile) %s não existe. O Daemon está parado?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "Processo inexistente" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "Servindo %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "Conjunto completo de FLAGS:" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Iniciando %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1804,34 +1776,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2290,10 +2234,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "Неверное число аргументов." - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "pidfile %s не обнаружен. Демон не запущен?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Запускается %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1779,34 +1752,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "обновление %s..." - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "неожиданная ошибка во время обновления" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2264,10 +2209,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2265,10 +2210,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "Обслуговування %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Запускається %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2263,10 +2208,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" diff --git a/po/zh_CN.po b/po/zh_CN.po index c3d292a93..d0ddcd2f7 100644 --- a/po/zh_CN.po +++ b/po/zh_CN.po @@ -17,11 +17,6 @@ msgstr "" "X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" "X-Generator: Launchpad (build 13405)\n" -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "启动 %s 中" - #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 #: ../nova/scheduler/simple.py:122 @@ -135,28 +130,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "错误参数个数。" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "pidfile %s 不存在,守护进程是否运行?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "没有该进程" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "正在为 %s 服务" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "FLAGS全集:" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1785,34 +1758,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2270,10 +2215,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" diff --git a/po/zh_TW.po b/po/zh_TW.po index ad14c0e32..896e69618 100644 --- a/po/zh_TW.po +++ b/po/zh_TW.po @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "pidfile %s 不存在. Daemon未啟動?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "沒有此一程序" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "正在啟動 %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2263,10 +2208,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -124,7 +124,6 @@ setup(name='nova', 'bin/nova-dhcpbridge', 'bin/nova-direct-api', 'bin/nova-import-canonical-imagestore', - 'bin/nova-instancemonitor', 'bin/nova-logspool', 'bin/nova-manage', 'bin/nova-network', diff --git a/tools/eventlet-patch b/tools/eventlet-patch deleted file mode 100644 index c87c5f279..000000000 --- a/tools/eventlet-patch +++ /dev/null @@ -1,24 +0,0 @@ -# HG changeset patch -# User Soren Hansen <soren@linux2go.dk> -# Date 1297678255 -3600 -# Node ID 4c846d555010bb5a91ab4da78dfe596451313742 -# Parent 5b7e9946c79f005c028eb63207cf5eb7bb21d1c3 -Don't attempt to wrap GreenPipes in GreenPipe - -If the os module is monkeypatched, Python's standard subprocess module -will return greenio.GreenPipe instances for Popen objects' stdin, stdout, -and stderr attributes. However, eventlet.green.subprocess tries to wrap -these attributes in another greenio.GreenPipe, which GreenPipe refuses. - -diff -r 5b7e9946c79f -r 4c846d555010 eventlet/green/subprocess.py ---- a/eventlet/green/subprocess.py Sat Feb 05 13:05:05 2011 -0800 -+++ b/eventlet/green/subprocess.py Mon Feb 14 11:10:55 2011 +0100 -@@ -27,7 +27,7 @@ - # eventlet.processes.Process.run() method. - for attr in "stdin", "stdout", "stderr": - pipe = getattr(self, attr) -- if pipe is not None: -+ if pipe is not None and not type(pipe) == greenio.GreenPipe: - wrapped_pipe = greenio.GreenPipe(pipe, pipe.mode, bufsize) - setattr(self, attr, wrapped_pipe) - __init__.__doc__ = subprocess_orig.Popen.__init__.__doc__ diff --git a/tools/install_venv.py b/tools/install_venv.py index f4b6583ed..3c2f6979f 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -31,7 +31,6 @@ import sys ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) VENV = os.path.join(ROOT, '.nova-venv') PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires') -TWISTED_NOVA = 'http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz' PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) @@ -106,20 +105,12 @@ def install_dependencies(venv=VENV): 'greenlet'], redirect_output=False) run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES], redirect_output=False) - run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, - TWISTED_NOVA], redirect_output=False) # Tell the virtual env how to "import nova" pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", "nova.pth") f = open(pthfile, 'w') f.write("%s\n" % ROOT) - # Patch eventlet (see FAQ # 1485) - patchsrc = os.path.join(ROOT, 'tools', 'eventlet-patch') - patchfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", - "eventlet", "green", "subprocess.py") - patch_cmd = "patch %s %s" % (patchfile, patchsrc) - os.system(patch_cmd) def print_help(): diff --git a/tools/pip-requires b/tools/pip-requires index dec93c351..23e707034 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -9,7 +9,7 @@ boto==1.9b carrot==0.10.5 eventlet lockfile==0.8 -python-novaclient==2.5.7 +python-novaclient==2.5.9 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 @@ -20,13 +20,13 @@ mox==0.5.3 greenlet==0.3.1 nose bzr -Twisted>=10.1.0 PasteDeploy paste sqlalchemy-migrate netaddr sphinx glance +xattr>=0.6.0 nova-adminclient suds==0.4 coverage |
