diff options
| author | Soren Hansen <soren@linux2go.dk> | 2011-08-09 22:20:53 +0100 |
|---|---|---|
| committer | Soren Hansen <soren@linux2go.dk> | 2011-08-09 22:20:53 +0100 |
| commit | adc4d2dc71b6dcdad4bca57925f89d7344a613e8 (patch) | |
| tree | fd996b679318d453e1e742b7c4d514e3675348e3 | |
| parent | 1d269ad0b9a8bc7d30ff1f91faa9afe465f87e98 (diff) | |
| parent | d6943d72525fd6a48bc9b3407bc90d9da7f99ad9 (diff) | |
| download | nova-adc4d2dc71b6dcdad4bca57925f89d7344a613e8.tar.gz nova-adc4d2dc71b6dcdad4bca57925f89d7344a613e8.tar.xz nova-adc4d2dc71b6dcdad4bca57925f89d7344a613e8.zip | |
Merge trunk
180 files changed, 7194 insertions, 5129 deletions
@@ -64,9 +64,11 @@ Kirill Shileev <kshileev@gmail.com> Koji Iida <iida.koji@lab.ntt.co.jp> Lorin Hochstein <lorin@isi.edu> Lvov Maxim <usrleon@gmail.com> +Mandell Degerness <mdegerne@gmail.com> Mark Washenberger <mark.washenberger@rackspace.com> Masanori Itoh <itoumsn@nttdata.co.jp> Matt Dietz <matt.dietz@rackspace.com> +Matthew Hooker <matt@cloudscaling.com> Michael Gundlach <michael.gundlach@rackspace.com> Mike Scherbakov <mihgen@gmail.com> Mohammed Naser <mnaser@vexxhost.com> @@ -105,3 +107,4 @@ Yoshiaki Tamura <yoshi@midokura.jp> Youcef Laribi <Youcef.Laribi@eu.citrix.com> Yuriy Taraday <yorik.sar@gmail.com> Zhixue Wu <Zhixue.Wu@citrix.com> +Zed Shaw <zedshaw@zedshaw.com> @@ -5,12 +5,23 @@ Step 1: Read http://www.python.org/dev/peps/pep-0008/ Step 2: Read http://www.python.org/dev/peps/pep-0008/ again Step 3: Read on + +General +------- +- Put two newlines between top-level code (funcs, classes, etc) +- Put one newline between methods in classes and anywhere else +- Do not write "except:", use "except Exception:" at the very least +- Include your name with TODOs as in "#TODO(termie)" +- Do not name anything the same name as a built-in or reserved word + + Imports ------- -- thou shalt not import objects, only modules -- thou shalt not import more than one module per line -- thou shalt not make relative imports -- thou shalt organize your imports according to the following template +- Do not import objects, only modules +- Do not import more than one module per line +- Do not make relative imports +- Order your imports by the full module path +- Organize your imports according to the following template :: # vim: tabstop=4 shiftwidth=4 softtabstop=4 @@ -22,16 +33,6 @@ Imports {{begin your code}} -General -------- -- thou shalt put two newlines twixt toplevel code (funcs, classes, etc) -- thou shalt put one newline twixt methods in classes and anywhere else -- thou shalt not write "except:", use "except Exception:" at the very least -- thou shalt include your name with TODOs as in "TODO(termie)" -- thou shalt not name anything the same name as a builtin or reserved word -- thou shalt not violate causality in our time cone, or else - - Human Alphabetical Order Examples --------------------------------- :: @@ -42,11 +43,13 @@ Human Alphabetical Order Examples import time import unittest - from nova import flags - from nova import test + import nova.api.ec2 + from nova.api import openstack from nova.auth import users - from nova.endpoint import api + import nova.flags from nova.endpoint import cloud + from nova import test + Docstrings ---------- @@ -70,6 +73,88 @@ Docstrings :param foo: the foo parameter :param bar: the bar parameter + :returns: return_type -- description of the return value :returns: description of the return value + :raises: AttributeError, KeyError """ + + +Dictionaries/Lists +------------------ + If a dictionary (dict) or list object is longer than 80 characters, its + items should be split with newlines. Embedded iterables should have their + items indented. Additionally, the last item in the dictionary should have + a trailing comma. This increases readability and simplifies future diffs. + + Example: + + my_dictionary = { + "image": { + "name": "Just a Snapshot", + "size": 2749573, + "properties": { + "user_id": 12, + "arch": "x86_64", + }, + "things": [ + "thing_one", + "thing_two", + ], + "status": "ACTIVE", + }, + } + + +Calling Methods +--------------- + Calls to methods 80 characters or longer should format each argument with + newlines. This is not a requirement, but a guideline. + + unnecessarily_long_function_name('string one', + 'string two', + kwarg1=constants.ACTIVE, + kwarg2=['a', 'b', 'c']) + + + Rather than constructing parameters inline, it is better to break things up: + + list_of_strings = [ + 'what_a_long_string', + 'not as long', + ] + + dict_of_numbers = { + 'one': 1, + 'two': 2, + 'twenty four': 24, + } + + object_one.call_a_method('string three', + 'string four', + kwarg1=list_of_strings, + kwarg2=dict_of_numbers) + + +Internationalization (i18n) Strings +----------------------------------- + In order to support multiple languages, we have a mechanism to support + automatic translations of exception and log strings. + + Example: + msg = _("An error occurred") + raise HTTPBadRequest(explanation=msg) + + If you have a variable to place within the string, first internationalize + the template string then do the replacement. + + Example: + msg = _("Missing parameter: %s") % ("flavor",) + LOG.error(msg) + + If you have multiple variables to place in the string, use keyword + parameters. This helps our translators reorder parameters when needed. + + Example: + msg = _("The server with id %(s_id)s has no key %(m_key)s") + LOG.error(msg % {"s_id": "1234", "m_key": "imageId"}) diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index 21cf68007..2329581a2 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -114,11 +114,11 @@ class AjaxConsoleProxy(object): AjaxConsoleProxy.tokens[kwargs['token']] = \ {'args': kwargs, 'last_activity': time.time()} - conn = rpc.Connection.instance(new=True) - consumer = rpc.TopicAdapterConsumer( - connection=conn, - proxy=TopicProxy, - topic=FLAGS.ajax_console_proxy_topic) + conn = rpc.create_connection(new=True) + consumer = rpc.create_consumer( + conn, + FLAGS.ajax_console_proxy_topic, + TopicProxy) def delete_expired_tokens(): now = time.time() diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor deleted file mode 100755 index b9d4e49d7..000000000 --- a/bin/nova-instancemonitor +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" - Daemon for Nova RRD based instance resource monitoring. -""" - -import gettext -import os -import sys -from twisted.application import service - -# If ../nova/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): - sys.path.insert(0, possible_topdir) - -gettext.install('nova', unicode=1) - -from nova import log as logging -from nova import utils -from nova import twistd -from nova.compute import monitor - -LOG = logging.getLogger('nova.instancemonitor') - - -if __name__ == '__main__': - utils.default_flagfile() - twistd.serve(__file__) - -if __name__ == '__builtin__': - LOG.warn(_('Starting instance monitor')) - # pylint: disable=C0103 - monitor = monitor.InstanceMonitor() - - # This is the parent service that twistd will be looking for when it - # parses this file, return it so that we can get it into globals below - application = service.Application('nova-instancemonitor') - monitor.setServiceParent(application) diff --git a/bin/nova-logspool b/bin/nova-logspool index 097459b12..a876f4c71 100644 --- a/bin/nova-logspool +++ b/bin/nova-logspool @@ -81,7 +81,6 @@ class LogReader(object): if level == 'ERROR': self.handle_logged_error(line) elif level == '[-]' and self.last_error: - # twisted stack trace line clean_line = " ".join(line.split(" ")[6:]) self.last_error.trace = self.last_error.trace + clean_line else: diff --git a/bin/nova-manage b/bin/nova-manage index 75d74903c..40f22c19c 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -56,6 +56,7 @@ import gettext import glob import json +import math import netaddr import os import sys @@ -591,6 +592,31 @@ class FixedIpCommands(object): fixed_ip['address'], mac_address, hostname, host) + @args('--address', dest="address", metavar='<ip address>', + help='IP address') + def reserve(self, address): + """Mark fixed ip as reserved + arguments: address""" + self._set_reserved(address, True) + + @args('--address', dest="address", metavar='<ip address>', + help='IP address') + def unreserve(self, address): + """Mark fixed ip as free to use + arguments: address""" + self._set_reserved(address, False) + + def _set_reserved(self, address, reserved): + ctxt = context.get_admin_context() + + try: + fixed_ip = db.fixed_ip_get_by_address(ctxt, address) + db.fixed_ip_update(ctxt, fixed_ip['address'], + {'reserved': reserved}) + except exception.NotFound as ex: + print "error: %s" % ex + sys.exit(2) + class FloatingIpCommands(object): """Class for managing floating ip.""" @@ -662,8 +688,9 @@ class NetworkCommands(object): # check for certain required inputs if not label: raise exception.NetworkNotCreated(req='--label') - if not fixed_range_v4: - raise exception.NetworkNotCreated(req='--fixed_range_v4') + if not (fixed_range_v4 or fixed_range_v6): + req = '--fixed_range_v4 or --fixed_range_v6' + raise exception.NetworkNotCreated(req=req) bridge = bridge or FLAGS.flat_network_bridge if not bridge: @@ -689,21 +716,21 @@ class NetworkCommands(object): if FLAGS.network_manager in interface_required: raise exception.NetworkNotCreated(req='--bridge_interface') - if FLAGS.use_ipv6: - fixed_range_v6 = fixed_range_v6 or FLAGS.fixed_range_v6 - if not fixed_range_v6: - raise exception.NetworkNotCreated(req='with use_ipv6, ' - '--fixed_range_v6') - gateway_v6 = gateway_v6 or FLAGS.gateway_v6 - if not gateway_v6: - raise exception.NetworkNotCreated(req='with use_ipv6, ' - '--gateway_v6') - # sanitize other input using FLAGS if necessary if not num_networks: num_networks = FLAGS.num_networks if not network_size: - network_size = FLAGS.network_size + fixnet = netaddr.IPNetwork(fixed_range_v4) + each_subnet_size = fixnet.size / int(num_networks) + if each_subnet_size > FLAGS.network_size: + network_size = FLAGS.network_size + subnet = 32 - int(math.log(network_size, 2)) + oversize_msg = _('Subnet(s) too large, defaulting to /%s.' + ' To override, specify network_size flag.' + ) % subnet + print oversize_msg + else: + network_size = fixnet.size if not multi_host: multi_host = FLAGS.multi_host else: @@ -735,8 +762,8 @@ class NetworkCommands(object): def list(self): """List all created networks""" print "%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" % ( - _('network'), - _('netmask'), + _('IPv4'), + _('IPv6'), _('start address'), _('DNS1'), _('DNS2'), @@ -745,7 +772,7 @@ class NetworkCommands(object): for network in db.network_get_all(context.get_admin_context()): print "%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" % ( network.cidr, - network.netmask, + network.cidr_v6, network.dhcp_start, network.dns1, network.dns2, @@ -1233,11 +1260,12 @@ class ImageCommands(object): is_public, architecture) def _lookup(self, old_image_id): + elevated = context.get_admin_context() try: internal_id = ec2utils.ec2_id_to_id(old_image_id) - image = self.image_service.show(context, internal_id) + image = self.image_service.show(elevated, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): - image = self.image_service.show_by_name(context, old_image_id) + image = self.image_service.show_by_name(elevated, old_image_id) return image['id'] def _old_to_new(self, old): diff --git a/bin/nova-objectstore b/bin/nova-objectstore index 1aef3a255..4d5aec445 100755 --- a/bin/nova-objectstore +++ b/bin/nova-objectstore @@ -18,7 +18,7 @@ # under the License. """ - Twisted daemon for nova objectstore. Supports S3 API. + Daemon for nova objectstore. Supports S3 API. """ import gettext diff --git a/contrib/nova.sh b/contrib/nova.sh index eab680580..7994e5133 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -75,7 +75,7 @@ if [ "$CMD" == "install" ]; then sudo modprobe kvm sudo /etc/init.d/libvirt-bin restart sudo modprobe nbd - sudo apt-get install -y python-twisted python-mox python-ipy python-paste + sudo apt-get install -y python-mox python-ipy python-paste sudo apt-get install -y python-migrate python-gflags python-greenlet sudo apt-get install -y python-libvirt python-libxml2 python-routes sudo apt-get install -y python-netaddr python-pastedeploy python-eventlet diff --git a/doc/source/api/autoindex.rst b/doc/source/api/autoindex.rst index 329a465db..d99d16eaa 100644 --- a/doc/source/api/autoindex.rst +++ b/doc/source/api/autoindex.rst @@ -26,7 +26,6 @@ nova..compute.api.rst nova..compute.instance_types.rst nova..compute.manager.rst - nova..compute.monitor.rst nova..compute.power_state.rst nova..console.api.rst nova..console.fake.rst @@ -115,13 +114,11 @@ nova..tests.test_scheduler.rst nova..tests.test_service.rst nova..tests.test_test.rst - nova..tests.test_twistd.rst nova..tests.test_utils.rst nova..tests.test_virt.rst nova..tests.test_volume.rst nova..tests.test_xenapi.rst nova..tests.xenapi.stubs.rst - nova..twistd.rst nova..utils.rst nova..version.rst nova..virt.connection.rst diff --git a/doc/source/api/nova..compute.monitor.rst b/doc/source/api/nova..compute.monitor.rst deleted file mode 100644 index a91169ecd..000000000 --- a/doc/source/api/nova..compute.monitor.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..compute.monitor` Module -============================================================================== -.. automodule:: nova..compute.monitor - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..tests.test_twistd.rst b/doc/source/api/nova..tests.test_twistd.rst deleted file mode 100644 index cae0c0a28..000000000 --- a/doc/source/api/nova..tests.test_twistd.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..tests.test_twistd` Module -============================================================================== -.. automodule:: nova..tests.test_twistd - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/api/nova..twistd.rst b/doc/source/api/nova..twistd.rst deleted file mode 100644 index d4145396d..000000000 --- a/doc/source/api/nova..twistd.rst +++ /dev/null @@ -1,6 +0,0 @@ -The :mod:`nova..twistd` Module -============================================================================== -.. automodule:: nova..twistd - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/code.rst b/doc/source/code.rst index 6b8d5661f..73fc31e1a 100644 --- a/doc/source/code.rst +++ b/doc/source/code.rst @@ -21,7 +21,6 @@ Generating source/api/nova..cloudpipe.pipelib.rst Generating source/api/nova..compute.disk.rst Generating source/api/nova..compute.instance_types.rst Generating source/api/nova..compute.manager.rst -Generating source/api/nova..compute.monitor.rst Generating source/api/nova..compute.power_state.rst Generating source/api/nova..context.rst Generating source/api/nova..crypto.rst @@ -79,11 +78,9 @@ Generating source/api/nova..tests.rpc_unittest.rst Generating source/api/nova..tests.runtime_flags.rst Generating source/api/nova..tests.scheduler_unittest.rst Generating source/api/nova..tests.service_unittest.rst -Generating source/api/nova..tests.twistd_unittest.rst Generating source/api/nova..tests.validator_unittest.rst Generating source/api/nova..tests.virt_unittest.rst Generating source/api/nova..tests.volume_unittest.rst -Generating source/api/nova..twistd.rst Generating source/api/nova..utils.rst Generating source/api/nova..validate.rst Generating source/api/nova..virt.connection.rst diff --git a/doc/source/devref/architecture.rst b/doc/source/devref/architecture.rst index 233cd6f08..7f44ecdf2 100644 --- a/doc/source/devref/architecture.rst +++ b/doc/source/devref/architecture.rst @@ -45,7 +45,7 @@ Below you will find a helpful explanation of the different components. * Web Dashboard: potential external component that talks to the api * api: component that receives http requests, converts commands and communicates with other components via the queue or http (in the case of objectstore) * Auth Manager: component responsible for users/projects/and roles. Can backend to DB or LDAP. This is not a separate binary, but rather a python class that is used by most components in the system. -* objectstore: twisted http server that replicates s3 api and allows storage and retrieval of images +* objectstore: http server that replicates s3 api and allows storage and retrieval of images * scheduler: decides which host gets each vm and volume * volume: manages dynamically attachable block devices. * network: manages ip forwarding, bridges, and vlans diff --git a/doc/source/devref/compute.rst b/doc/source/devref/compute.rst index 31cc2037f..50397cbec 100644 --- a/doc/source/devref/compute.rst +++ b/doc/source/devref/compute.rst @@ -118,19 +118,6 @@ The :mod:`nova.virt.fake` Driver :show-inheritance: -Monitoring ----------- - -The :mod:`nova.compute.monitor` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.compute.monitor - :noindex: - :members: - :undoc-members: - :show-inheritance: - - Tests ----- diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst index f3c454d64..09f1eb2c2 100644 --- a/doc/source/devref/development.environment.rst +++ b/doc/source/devref/development.environment.rst @@ -51,7 +51,7 @@ To activate the Nova virtualenv for the extent of your current shell session Also, make test will automatically use the virtualenv. -If you don't want to create a virtualenv every time you branch (which takes a while as long as we have the large Twisted project as a dependency) you can reuse a single virtualenv for all branches. +If you don't want to create a virtualenv every time you branch you can reuse a single virtualenv for all branches. #. If you don't have a nova/ directory containing trunk/ and other branches, do so now. #. Go into nova/trunk and install a virtualenv. diff --git a/doc/source/devref/nova.rst b/doc/source/devref/nova.rst index 093fbb3ee..beca99ecd 100644 --- a/doc/source/devref/nova.rst +++ b/doc/source/devref/nova.rst @@ -102,16 +102,6 @@ The :mod:`nova.test` Module :show-inheritance: -The :mod:`nova.twistd` Module ------------------------------ - -.. automodule:: nova.twistd - :noindex: - :members: - :undoc-members: - :show-inheritance: - - The :mod:`nova.utils` Module ---------------------------- @@ -215,16 +205,6 @@ The :mod:`runtime_flags` Module :show-inheritance: -The :mod:`twistd_unittest` Module -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. automodule:: nova.tests.twistd_unittest - :noindex: - :members: - :undoc-members: - :show-inheritance: - - The :mod:`validator_unittest` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/nova/api/direct.py b/nova/api/direct.py index ec79151b1..fdd2943d2 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -107,7 +107,8 @@ class DelegatedAuthMiddleware(wsgi.Middleware): def process_request(self, request): os_user = request.headers['X-OpenStack-User'] os_project = request.headers['X-OpenStack-Project'] - context_ref = context.RequestContext(user=os_user, project=os_project) + context_ref = context.RequestContext(user_id=os_user, + project_id=os_project) request.environ['openstack.context'] = context_ref @@ -295,8 +296,8 @@ class ServiceWrapper(object): 'application/json': nova.api.openstack.wsgi.JSONDictSerializer(), }[content_type] return serializer.serialize(result) - except: - raise exception.Error("returned non-serializable type: %s" + except Exception, e: + raise exception.Error(_("Returned non-serializable type: %s") % result) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index cf1734281..8b6e47cfb 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -66,7 +66,7 @@ class RequestLogging(wsgi.Middleware): else: controller = None action = None - ctxt = request.environ.get('ec2.context', None) + ctxt = request.environ.get('nova.context', None) delta = utils.utcnow() - start seconds = delta.seconds microseconds = delta.microseconds @@ -139,8 +139,7 @@ class Lockout(wsgi.Middleware): class Authenticate(wsgi.Middleware): - - """Authenticate an EC2 request and add 'ec2.context' to WSGI environ.""" + """Authenticate an EC2 request and add 'nova.context' to WSGI environ.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): @@ -148,7 +147,7 @@ class Authenticate(wsgi.Middleware): try: signature = req.params['Signature'] access = req.params['AWSAccessKeyId'] - except: + except KeyError, e: raise webob.exc.HTTPBadRequest() # Make a copy of args for authentication and signature verification. @@ -157,8 +156,9 @@ class Authenticate(wsgi.Middleware): auth_params.pop('Signature') # Authenticate the request. + authman = manager.AuthManager() try: - (user, project) = manager.AuthManager().authenticate( + (user, project) = authman.authenticate( access, signature, auth_params, @@ -174,14 +174,17 @@ class Authenticate(wsgi.Middleware): remote_address = req.remote_addr if FLAGS.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) - ctxt = context.RequestContext(user=user, - project=project, + roles = authman.get_active_roles(user, project) + ctxt = context.RequestContext(user_id=user.id, + project_id=project.id, + is_admin=user.is_admin(), + roles=roles, remote_address=remote_address) - req.environ['ec2.context'] = ctxt + req.environ['nova.context'] = ctxt uname = user.name pname = project.name msg = _('Authenticated Request For %(uname)s:%(pname)s)') % locals() - LOG.audit(msg, context=req.environ['ec2.context']) + LOG.audit(msg, context=req.environ['nova.context']) return self.application @@ -208,7 +211,7 @@ class Requestify(wsgi.Middleware): for non_arg in non_args: # Remove, but raise KeyError if omitted args.pop(non_arg) - except: + except KeyError, e: raise webob.exc.HTTPBadRequest() LOG.debug(_('action: %s'), action) @@ -228,7 +231,7 @@ class Authorizer(wsgi.Middleware): """Authorize an EC2 API request. Return a 401 if ec2.controller and ec2.action in WSGI environ may not be - executed in ec2.context. + executed in nova.context. """ def __init__(self, application): @@ -282,7 +285,7 @@ class Authorizer(wsgi.Middleware): @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): - context = req.environ['ec2.context'] + context = req.environ['nova.context'] controller = req.environ['ec2.request'].controller.__class__.__name__ action = req.environ['ec2.request'].action allowed_roles = self.action_roles[controller].get(action, ['none']) @@ -295,28 +298,27 @@ class Authorizer(wsgi.Middleware): def _matches_any_role(self, context, roles): """Return True if any role in roles is allowed in context.""" - if context.user.is_superuser(): + if context.is_admin: return True if 'all' in roles: return True if 'none' in roles: return False - return any(context.project.has_role(context.user_id, role) - for role in roles) + return any(role in context.roles for role in roles) class Executor(wsgi.Application): """Execute an EC2 API request. - Executes 'ec2.action' upon 'ec2.controller', passing 'ec2.context' and + Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and 'ec2.action_args' (all variables in WSGI environ.) Returns an XML response, or a 400 upon failure. """ @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): - context = req.environ['ec2.context'] + context = req.environ['nova.context'] api_request = req.environ['ec2.request'] result = None try: @@ -352,6 +354,10 @@ class Executor(wsgi.Application): LOG.debug(_('KeyPairExists raised: %s'), unicode(ex), context=context) return self._error(req, context, type(ex).__name__, unicode(ex)) + except exception.InvalidParameterValue as ex: + LOG.debug(_('InvalidParameterValue raised: %s'), unicode(ex), + context=context) + return self._error(req, context, type(ex).__name__, unicode(ex)) except Exception as ex: extra = {'environment': req.environ} LOG.exception(_('Unexpected error raised: %s'), unicode(ex), diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 7d78c5cfa..9a3e55925 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -104,7 +104,7 @@ class APIRequest(object): for key in data.keys(): val = data[key] el.appendChild(self._render_data(xml, key, val)) - except: + except Exception: LOG.debug(data) raise diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 0294c09c5..f64a92d12 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -25,11 +25,13 @@ datastore. import base64 import netaddr import os -import urllib +import re +import shutil import tempfile import time -import shutil +import urllib +from nova import block_device from nova import compute from nova import context @@ -78,6 +80,10 @@ def _gen_key(context, user_id, key_name): # TODO(yamahata): hypervisor dependent default device name _DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1' +_DEFAULT_MAPPINGS = {'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': _DEFAULT_ROOT_DEVICE_NAME, + 'swap': 'sda3'} def _parse_block_device_mapping(bdm): @@ -105,7 +111,7 @@ def _parse_block_device_mapping(bdm): def _properties_get_mappings(properties): - return ec2utils.mappings_prepend_dev(properties.get('mappings', [])) + return block_device.mappings_prepend_dev(properties.get('mappings', [])) def _format_block_device_mapping(bdm): @@ -144,8 +150,7 @@ def _format_mappings(properties, result): """Format multiple BlockDeviceMappingItemType""" mappings = [{'virtualName': m['virtual'], 'deviceName': m['device']} for m in _properties_get_mappings(properties) - if (m['virtual'] == 'swap' or - m['virtual'].startswith('ephemeral'))] + if block_device.is_swap_or_ephemeral(m['virtual'])] block_device_mapping = [_format_block_device_mapping(bdm) for bdm in properties.get('block_device_mapping', [])] @@ -233,6 +238,30 @@ class CloudController(object): state = 'available' return image['properties'].get('image_state', state) + def _format_instance_mapping(self, ctxt, instance_ref): + root_device_name = instance_ref['root_device_name'] + if root_device_name is None: + return _DEFAULT_MAPPINGS + + mappings = {} + mappings['ami'] = block_device.strip_dev(root_device_name) + mappings['root'] = root_device_name + + # 'ephemeralN' and 'swap' + for bdm in db.block_device_mapping_get_all_by_instance( + ctxt, instance_ref['id']): + if (bdm['volume_id'] or bdm['snapshot_id'] or bdm['no_device']): + continue + + virtual_name = bdm['virtual_name'] + if not virtual_name: + continue + + if block_device.is_swap_or_ephemeral(virtual_name): + mappings[virtual_name] = bdm['device_name'] + + return mappings + def get_metadata(self, address): ctxt = context.get_admin_context() instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address) @@ -259,18 +288,14 @@ class CloudController(object): security_groups = db.security_group_get_by_instance(ctxt, instance_ref['id']) security_groups = [x['name'] for x in security_groups] + mappings = self._format_instance_mapping(ctxt, instance_ref) data = { - 'user-data': base64.b64decode(instance_ref['user_data']), + 'user-data': self._format_user_data(instance_ref), 'meta-data': { 'ami-id': image_ec2_id, 'ami-launch-index': instance_ref['launch_index'], 'ami-manifest-path': 'FIXME', - 'block-device-mapping': { - # TODO(vish): replace with real data - 'ami': 'sda1', - 'ephemeral0': 'sda2', - 'root': _DEFAULT_ROOT_DEVICE_NAME, - 'swap': 'sda3'}, + 'block-device-mapping': mappings, 'hostname': hostname, 'instance-action': 'none', 'instance-id': ec2_id, @@ -765,6 +790,22 @@ class CloudController(object): return source_project_id def create_security_group(self, context, group_name, group_description): + if not re.match('^[a-zA-Z0-9_\- ]+$', str(group_name)): + # Some validation to ensure that values match API spec. + # - Alphanumeric characters, spaces, dashes, and underscores. + # TODO(Daviey): LP: #813685 extend beyond group_name checking, and + # probably create a param validator that can be used elsewhere. + err = _("Value (%s) for parameter GroupName is invalid." + " Content limited to Alphanumeric characters, " + "spaces, dashes, and underscores.") % group_name + # err not that of master ec2 implementation, as they fail to raise. + raise exception.InvalidParameterValue(err=err) + + if len(str(group_name)) > 255: + err = _("Value (%s) for parameter GroupName is invalid." + " Length exceeds maximum of 255.") % group_name + raise exception.InvalidParameterValue(err=err) + LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): @@ -948,13 +989,102 @@ class CloudController(object): 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} - def _convert_to_set(self, lst, label): + @staticmethod + def _convert_to_set(lst, label): if lst is None or lst == []: return None if not isinstance(lst, list): lst = [lst] return [{label: x} for x in lst] + def _format_kernel_id(self, instance_ref, result, key): + kernel_id = instance_ref['kernel_id'] + if kernel_id is None: + return + result[key] = self.image_ec2_id(instance_ref['kernel_id'], 'aki') + + def _format_ramdisk_id(self, instance_ref, result, key): + ramdisk_id = instance_ref['ramdisk_id'] + if ramdisk_id is None: + return + result[key] = self.image_ec2_id(instance_ref['ramdisk_id'], 'ari') + + @staticmethod + def _format_user_data(instance_ref): + return base64.b64decode(instance_ref['user_data']) + + def describe_instance_attribute(self, context, instance_id, attribute, + **kwargs): + def _unsupported_attribute(instance, result): + raise exception.ApiError(_('attribute not supported: %s') % + attribute) + + def _format_attr_block_device_mapping(instance, result): + tmp = {} + self._format_instance_root_device_name(instance, tmp) + self._format_instance_bdm(context, instance_id, + tmp['rootDeviceName'], result) + + def _format_attr_disable_api_termination(instance, result): + _unsupported_attribute(instance, result) + + def _format_attr_group_set(instance, result): + CloudController._format_group_set(instance, result) + + def _format_attr_instance_initiated_shutdown_behavior(instance, + result): + state_description = instance['state_description'] + state_to_value = {'stopping': 'stop', + 'stopped': 'stop', + 'terminating': 'terminate'} + value = state_to_value.get(state_description) + if value: + result['instanceInitiatedShutdownBehavior'] = value + + def _format_attr_instance_type(instance, result): + self._format_instance_type(instance, result) + + def _format_attr_kernel(instance, result): + self._format_kernel_id(instance, result, 'kernel') + + def _format_attr_ramdisk(instance, result): + self._format_ramdisk_id(instance, result, 'ramdisk') + + def _format_attr_root_device_name(instance, result): + self._format_instance_root_device_name(instance, result) + + def _format_attr_source_dest_check(instance, result): + _unsupported_attribute(instance, result) + + def _format_attr_user_data(instance, result): + result['userData'] = self._format_user_data(instance) + + attribute_formatter = { + 'blockDeviceMapping': _format_attr_block_device_mapping, + 'disableApiTermination': _format_attr_disable_api_termination, + 'groupSet': _format_attr_group_set, + 'instanceInitiatedShutdownBehavior': + _format_attr_instance_initiated_shutdown_behavior, + 'instanceType': _format_attr_instance_type, + 'kernel': _format_attr_kernel, + 'ramdisk': _format_attr_ramdisk, + 'rootDeviceName': _format_attr_root_device_name, + 'sourceDestCheck': _format_attr_source_dest_check, + 'userData': _format_attr_user_data, + } + + fn = attribute_formatter.get(attribute) + if fn is None: + raise exception.ApiError( + _('attribute not supported: %s') % attribute) + + ec2_instance_id = instance_id + instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) + instance = self.compute_api.get(context, instance_id) + result = {'instance_id': ec2_instance_id} + fn(instance, result) + return result + def describe_instances(self, context, **kwargs): return self._format_describe_instances(context, **kwargs) @@ -1001,6 +1131,27 @@ class CloudController(object): result['blockDeviceMapping'] = mapping result['rootDeviceType'] = root_device_type + @staticmethod + def _format_instance_root_device_name(instance, result): + result['rootDeviceName'] = (instance.get('root_device_name') or + _DEFAULT_ROOT_DEVICE_NAME) + + @staticmethod + def _format_instance_type(instance, result): + if instance['instance_type']: + result['instanceType'] = instance['instance_type'].get('name') + else: + result['instanceType'] = None + + @staticmethod + def _format_group_set(instance, result): + security_group_names = [] + if instance.get('security_groups'): + for security_group in instance['security_groups']: + security_group_names.append(security_group['name']) + result['groupSet'] = CloudController._convert_to_set( + security_group_names, 'groupId') + def _format_instances(self, context, instance_id=None, **kwargs): # TODO(termie): this method is poorly named as its name does not imply # that it will be making a variety of database calls @@ -1026,6 +1177,8 @@ class CloudController(object): ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id i['imageId'] = self.image_ec2_id(instance['image_ref']) + self._format_kernel_id(instance, i, 'kernelId') + self._format_ramdisk_id(instance, i, 'ramdiskId') i['instanceState'] = { 'code': instance['state'], 'name': instance['state_description']} @@ -1054,16 +1207,12 @@ class CloudController(object): instance['project_id'], instance['host']) i['productCodesSet'] = self._convert_to_set([], 'product_codes') - if instance['instance_type']: - i['instanceType'] = instance['instance_type'].get('name') - else: - i['instanceType'] = None + self._format_instance_type(instance, i) i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] i['displayName'] = instance['display_name'] i['displayDescription'] = instance['display_description'] - i['rootDeviceName'] = (instance.get('root_device_name') or - _DEFAULT_ROOT_DEVICE_NAME) + self._format_instance_root_device_name(instance, i) self._format_instance_bdm(context, instance_id, i['rootDeviceName'], i) host = instance['host'] @@ -1073,12 +1222,7 @@ class CloudController(object): r = {} r['reservationId'] = instance['reservation_id'] r['ownerId'] = instance['project_id'] - security_group_names = [] - if instance.get('security_groups'): - for security_group in instance['security_groups']: - security_group_names.append(security_group['name']) - r['groupSet'] = self._convert_to_set(security_group_names, - 'groupId') + self._format_group_set(instance, r) r['instancesSet'] = [] reservations[instance['reservation_id']] = r reservations[instance['reservation_id']]['instancesSet'].append(i) @@ -1314,7 +1458,7 @@ class CloudController(object): i['architecture'] = image['properties'].get('architecture') properties = image['properties'] - root_device_name = ec2utils.properties_root_device_name(properties) + root_device_name = block_device.properties_root_device_name(properties) root_device_type = 'instance-store' for bdm in properties.get('block_device_mapping', []): if (bdm.get('device_name') == root_device_name and @@ -1387,7 +1531,7 @@ class CloudController(object): def _root_device_name_attribute(image, result): result['rootDeviceName'] = \ - ec2utils.properties_root_device_name(image['properties']) + block_device.properties_root_device_name(image['properties']) if result['rootDeviceName'] is None: result['rootDeviceName'] = _DEFAULT_ROOT_DEVICE_NAME @@ -1520,8 +1664,7 @@ class CloudController(object): if virtual_name in ('ami', 'root'): continue - assert (virtual_name == 'swap' or - virtual_name.startswith('ephemeral')) + assert block_device.is_swap_or_ephemeral(virtual_name) device_name = m['device'] if device_name in [b['device_name'] for b in mapping if not b.get('no_device', False)]: diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py index bae1e0ee5..bcdf2ba78 100644 --- a/nova/api/ec2/ec2utils.py +++ b/nova/api/ec2/ec2utils.py @@ -135,32 +135,3 @@ def dict_from_dotted_str(items): args[key] = value return args - - -def properties_root_device_name(properties): - """get root device name from image meta data. - If it isn't specified, return None. - """ - root_device_name = None - - # NOTE(yamahata): see image_service.s3.s3create() - for bdm in properties.get('mappings', []): - if bdm['virtual'] == 'root': - root_device_name = bdm['device'] - - # NOTE(yamahata): register_image's command line can override - # <machine>.manifest.xml - if 'root_device_name' in properties: - root_device_name = properties['root_device_name'] - - return root_device_name - - -def mappings_prepend_dev(mappings): - """Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type""" - for m in mappings: - virtual = m['virtual'] - if ((virtual == 'swap' or virtual.startswith('ephemeral')) and - (not m['device'].startswith('/'))): - m['device'] = '/dev/' + m['device'] - return mappings diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 868b98a31..4d49df2ad 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -40,6 +40,7 @@ from nova.api.openstack import servers from nova.api.openstack import server_metadata from nova.api.openstack import shared_ip_groups from nova.api.openstack import users +from nova.api.openstack import versions from nova.api.openstack import wsgi from nova.api.openstack import zones @@ -49,6 +50,9 @@ FLAGS = flags.FLAGS flags.DEFINE_bool('allow_admin_api', False, 'When True, this API service will accept admin operations.') +flags.DEFINE_bool('allow_instance_snapshots', + True, + 'When True, this API service will permit instance snapshot operations.') class FaultWrapper(base_wsgi.Middleware): @@ -96,6 +100,7 @@ class APIRouter(base_wsgi.Router): server_members['suspend'] = 'POST' server_members['resume'] = 'POST' server_members['rescue'] = 'POST' + server_members['migrate'] = 'POST' server_members['unrescue'] = 'POST' server_members['reset_network'] = 'POST' server_members['inject_network_info'] = 'POST' @@ -115,6 +120,10 @@ class APIRouter(base_wsgi.Router): 'select': 'POST', 'boot': 'POST'}) + mapper.connect("versions", "/", + controller=versions.create_resource(version), + action='show') + mapper.resource("console", "consoles", controller=consoles.create_resource(), parent_resource=dict(member_name='server', @@ -164,7 +173,9 @@ class APIRouterV11(APIRouter): def _setup_routes(self, mapper): super(APIRouterV11, self)._setup_routes(mapper, '1.1') + image_metadata_controller = image_metadata.create_resource() + mapper.resource("image_meta", "metadata", controller=image_metadata_controller, parent_resource=dict(member_name='image', @@ -175,7 +186,14 @@ class APIRouterV11(APIRouter): action='update_all', conditions={"method": ['PUT']}) - mapper.resource("server_meta", "meta", - controller=server_metadata.create_resource(), + server_metadata_controller = server_metadata.create_resource() + + mapper.resource("server_meta", "metadata", + controller=server_metadata_controller, parent_resource=dict(member_name='server', collection_name='servers')) + + mapper.connect("metadata", "/servers/{server_id}/metadata", + controller=server_metadata_controller, + action='update_all', + conditions={"method": ['PUT']}) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 7c3e683d6..d42abe1f8 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -48,31 +48,35 @@ class AuthMiddleware(wsgi.Middleware): def __call__(self, req): if not self.has_authentication(req): return self.authenticate(req) - user = self.get_user_by_authentication(req) - if not user: + user_id = self.get_user_by_authentication(req) + if not user_id: token = req.headers["X-Auth-Token"] - msg = _("%(user)s could not be found with token '%(token)s'") + msg = _("%(user_id)s could not be found with token '%(token)s'") LOG.warn(msg % locals()) return faults.Fault(webob.exc.HTTPUnauthorized()) try: - account = req.headers["X-Auth-Project-Id"] + project_id = req.headers["X-Auth-Project-Id"] except KeyError: # FIXME(usrleon): It needed only for compatibility # while osapi clients don't use this header - accounts = self.auth.get_projects(user=user) - if accounts: - account = accounts[0] + projects = self.auth.get_projects(user_id) + if projects: + project_id = projects[0].id else: return faults.Fault(webob.exc.HTTPUnauthorized()) - if not self.auth.is_admin(user) and \ - not self.auth.is_project_member(user, account): - msg = _("%(user)s must be an admin or a member of %(account)s") + is_admin = self.auth.is_admin(user_id) + req.environ['nova.context'] = context.RequestContext(user_id, + project_id, + is_admin) + if not is_admin and not self.auth.is_project_member(user_id, + project_id): + msg = _("%(user_id)s must be an admin or a " + "member of %(project_id)s") LOG.warn(msg % locals()) return faults.Fault(webob.exc.HTTPUnauthorized()) - req.environ['nova.context'] = context.RequestContext(user, account) return self.application def has_authentication(self, req): @@ -133,7 +137,7 @@ class AuthMiddleware(wsgi.Middleware): if delta.days >= 2: self.db.auth_token_destroy(ctxt, token['token_hash']) else: - return self.auth.get_user(token['user_id']) + return token['user_id'] return None def _authorize_user(self, username, key, req): diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index efa4ab385..ac2104a5f 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -15,8 +15,9 @@ # License for the specific language governing permissions and limitations # under the License. +import functools import re -from urlparse import urlparse +import urlparse from xml.dom import minidom import webob @@ -137,8 +138,8 @@ def get_id_from_href(href): if re.match(r'\d+$', str(href)): return int(href) try: - return int(urlparse(href).path.split('/')[-1]) - except: + return int(urlparse.urlsplit(href).path.split('/')[-1]) + except ValueError, e: LOG.debug(_("Error extracting id from href: %s") % href) raise ValueError(_('could not parse id from href')) @@ -153,22 +154,18 @@ def remove_version_from_href(href): Returns: 'http://www.nova.com' """ - try: - #removes the first instance that matches /v#.#/ - new_href = re.sub(r'[/][v][0-9]+\.[0-9]+[/]', '/', href, count=1) + parsed_url = urlparse.urlsplit(href) + new_path = re.sub(r'^/v[0-9]+\.[0-9]+(/|$)', r'\1', parsed_url.path, + count=1) - #if no version was found, try finding /v#.# at the end of the string - if new_href == href: - new_href = re.sub(r'[/][v][0-9]+\.[0-9]+$', '', href, count=1) - except: - LOG.debug(_("Error removing version from href: %s") % href) - msg = _('could not parse version from href') + if new_path == parsed_url.path: + msg = _('href %s does not contain version') % href + LOG.debug(msg) raise ValueError(msg) - if new_href == href: - msg = _('href does not contain version') - raise ValueError(msg) - return new_href + parsed_url = list(parsed_url) + parsed_url[2] = new_path + return urlparse.urlunsplit(parsed_url) def get_version_from_href(href): @@ -196,7 +193,17 @@ def get_version_from_href(href): return version -class MetadataXMLDeserializer(wsgi.MetadataXMLDeserializer): +class MetadataXMLDeserializer(wsgi.XMLDeserializer): + + def extract_metadata(self, metadata_node): + """Marshal the metadata attribute of a parsed request""" + if metadata_node is None: + return {} + metadata = {} + for meta_node in self.find_children_named(metadata_node, "meta"): + key = meta_node.getAttribute("key") + metadata[key] = self.extract_text(meta_node) + return metadata def _extract_metadata_container(self, datastring): dom = minidom.parseString(datastring) @@ -247,7 +254,7 @@ class MetadataXMLSerializer(wsgi.XMLDictSerializer): container_node = self.meta_list_to_xml(xml_doc, items) xml_doc.appendChild(container_node) self._add_xmlns(container_node) - return xml_doc.toprettyxml(indent=' ', encoding='UTF-8') + return xml_doc.toxml('UTF-8') def index(self, metadata_dict): return self._meta_list_to_xml_string(metadata_dict) @@ -264,7 +271,7 @@ class MetadataXMLSerializer(wsgi.XMLDictSerializer): item_node = self._meta_item_to_xml(xml_doc, item_key, item_value) xml_doc.appendChild(item_node) self._add_xmlns(item_node) - return xml_doc.toprettyxml(indent=' ', encoding='UTF-8') + return xml_doc.toxml('UTF-8') def show(self, meta_item_dict): return self._meta_item_to_xml_string(meta_item_dict['meta']) @@ -274,3 +281,15 @@ class MetadataXMLSerializer(wsgi.XMLDictSerializer): def default(self, *args, **kwargs): return '' + + +def check_snapshots_enabled(f): + @functools.wraps(f) + def inner(*args, **kwargs): + if not FLAGS.allow_instance_snapshots: + LOG.warn(_('Rejecting snapshot request, snapshots currently' + ' disabled')) + msg = _("Instance snapshots are not permitted at this time.") + raise webob.exc.HTTPBadRequest(explanation=msg) + return f(*args, **kwargs) + return inner diff --git a/nova/api/openstack/contrib/admin_only.py b/nova/api/openstack/contrib/admin_only.py new file mode 100644 index 000000000..e821c9e1f --- /dev/null +++ b/nova/api/openstack/contrib/admin_only.py @@ -0,0 +1,30 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Decorator for limiting extensions that should be admin-only.""" + +from functools import wraps +from nova import flags +FLAGS = flags.FLAGS + + +def admin_only(fnc): + @wraps(fnc) + def _wrapped(self, *args, **kwargs): + if FLAGS.allow_admin_api: + return fnc(self, *args, **kwargs) + return [] + _wrapped.func_name = fnc.func_name + return _wrapped diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index b4a211857..3d8049324 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -27,9 +27,9 @@ from nova.api.openstack import extensions def _translate_floating_ip_view(floating_ip): result = {'id': floating_ip['id'], 'ip': floating_ip['address']} - if 'fixed_ip' in floating_ip: + try: result['fixed_ip'] = floating_ip['fixed_ip']['address'] - else: + except (TypeError, KeyError): result['fixed_ip'] = None if 'instance' in floating_ip: result['instance_id'] = floating_ip['instance']['id'] diff --git a/nova/api/openstack/contrib/hosts.py b/nova/api/openstack/contrib/hosts.py index 55e57e1a4..ecaa365b7 100644 --- a/nova/api/openstack/contrib/hosts.py +++ b/nova/api/openstack/contrib/hosts.py @@ -24,6 +24,7 @@ from nova import log as logging from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import faults +from nova.api.openstack.contrib import admin_only from nova.scheduler import api as scheduler_api @@ -70,7 +71,7 @@ class HostController(object): key = raw_key.lower().strip() val = raw_val.lower().strip() # NOTE: (dabo) Right now only 'status' can be set, but other - # actions may follow. + # settings may follow. if key == "status": if val[:6] in ("enable", "disabl"): return self._set_enabled_status(req, id, @@ -89,8 +90,30 @@ class HostController(object): LOG.audit(_("Setting host %(host)s to %(state)s.") % locals()) result = self.compute_api.set_host_enabled(context, host=host, enabled=enabled) + if result not in ("enabled", "disabled"): + # An error message was returned + raise webob.exc.HTTPBadRequest(explanation=result) return {"host": host, "status": result} + def _host_power_action(self, req, host, action): + """Reboots, shuts down or powers up the host.""" + context = req.environ['nova.context'] + try: + result = self.compute_api.host_power_action(context, host=host, + action=action) + except NotImplementedError as e: + raise webob.exc.HTTPBadRequest(explanation=e.msg) + return {"host": host, "power_action": result} + + def startup(self, req, id): + return self._host_power_action(req, host=id, action="startup") + + def shutdown(self, req, id): + return self._host_power_action(req, host=id, action="shutdown") + + def reboot(self, req, id): + return self._host_power_action(req, host=id, action="reboot") + class Hosts(extensions.ExtensionDescriptor): def get_name(self): @@ -108,7 +131,10 @@ class Hosts(extensions.ExtensionDescriptor): def get_updated(self): return "2011-06-29T00:00:00+00:00" + @admin_only.admin_only def get_resources(self): - resources = [extensions.ResourceExtension('os-hosts', HostController(), - collection_actions={'update': 'PUT'}, member_actions={})] + resources = [extensions.ResourceExtension('os-hosts', + HostController(), collection_actions={'update': 'PUT'}, + member_actions={"startup": "GET", "shutdown": "GET", + "reboot": "GET"})] return resources diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 1342397c4..894d47beb 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -20,6 +20,7 @@ import webob from webob import exc from xml.dom import minidom +from nova import db from nova import exception from nova import flags from nova import log as logging @@ -28,8 +29,8 @@ from nova import quota from nova import utils from nova.compute import instance_types +from nova.api.openstack import common from nova.api.openstack import wsgi -from nova.auth import manager as auth_manager LOG = logging.getLogger('nova.api.openstack.create_instance_helper') @@ -80,13 +81,21 @@ class CreateInstanceHelper(object): key_name = None key_data = None - key_pairs = auth_manager.AuthManager.get_key_pairs(context) + # TODO(vish): Key pair access should move into a common library + # instead of being accessed directly from the db. + key_pairs = db.key_pair_get_all_by_user(context.elevated(), + context.user_id) if key_pairs: key_pair = key_pairs[0] key_name = key_pair['name'] key_data = key_pair['public_key'] image_href = self.controller._image_ref_from_req_data(body) + # If the image href was generated by nova api, strip image_href + # down to an id and use the default glance connection params + + if str(image_href).startswith(req.application_url): + image_href = image_href.split('/').pop() try: image_service, image_id = nova.image.get_image_service(image_href) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( @@ -285,7 +294,7 @@ class CreateInstanceHelper(object): return password -class ServerXMLDeserializer(wsgi.MetadataXMLDeserializer): +class ServerXMLDeserializer(wsgi.XMLDeserializer): """ Deserializer to handle xml-formatted server create requests. @@ -293,6 +302,8 @@ class ServerXMLDeserializer(wsgi.MetadataXMLDeserializer): and personality attributes """ + metadata_deserializer = common.MetadataXMLDeserializer() + def create(self, string): """Deserialize an xml-formatted server create request""" dom = minidom.parseString(string) @@ -304,14 +315,14 @@ class ServerXMLDeserializer(wsgi.MetadataXMLDeserializer): server = {} server_node = self.find_first_child_named(node, 'server') - attributes = ["name", "imageId", "flavorId", "imageRef", - "flavorRef", "adminPass"] + attributes = ["name", "imageId", "flavorId", "adminPass"] for attr in attributes: if server_node.getAttribute(attr): server[attr] = server_node.getAttribute(attr) metadata_node = self.find_first_child_named(server_node, "metadata") - server["metadata"] = self.extract_metadata(metadata_node) + server["metadata"] = self.metadata_deserializer.extract_metadata( + metadata_node) server["personality"] = self._extract_personality(server_node) @@ -329,3 +340,135 @@ class ServerXMLDeserializer(wsgi.MetadataXMLDeserializer): item["contents"] = self.extract_text(file_node) personality.append(item) return personality + + +class ServerXMLDeserializerV11(wsgi.MetadataXMLDeserializer): + """ + Deserializer to handle xml-formatted server create requests. + + Handles standard server attributes as well as optional metadata + and personality attributes + """ + + metadata_deserializer = common.MetadataXMLDeserializer() + + def action(self, string): + dom = minidom.parseString(string) + action_node = dom.childNodes[0] + action_name = action_node.tagName + + action_deserializer = { + 'createImage': self._action_create_image, + 'createBackup': self._action_create_backup, + 'changePassword': self._action_change_password, + 'reboot': self._action_reboot, + 'rebuild': self._action_rebuild, + 'resize': self._action_resize, + 'confirmResize': self._action_confirm_resize, + 'revertResize': self._action_revert_resize, + }.get(action_name, self.default) + + action_data = action_deserializer(action_node) + + return {'body': {action_name: action_data}} + + def _action_create_image(self, node): + return self._deserialize_image_action(node, ('name',)) + + def _action_create_backup(self, node): + attributes = ('name', 'backup_type', 'rotation') + return self._deserialize_image_action(node, attributes) + + def _action_change_password(self, node): + if not node.hasAttribute("adminPass"): + raise AttributeError("No adminPass was specified in request") + return {"adminPass": node.getAttribute("adminPass")} + + def _action_reboot(self, node): + if not node.hasAttribute("type"): + raise AttributeError("No reboot type was specified in request") + return {"type": node.getAttribute("type")} + + def _action_rebuild(self, node): + rebuild = {} + if node.hasAttribute("name"): + rebuild['name'] = node.getAttribute("name") + + metadata_node = self.find_first_child_named(node, "metadata") + if metadata_node is not None: + rebuild["metadata"] = self.extract_metadata(metadata_node) + + personality = self._extract_personality(node) + if personality is not None: + rebuild["personality"] = personality + + if not node.hasAttribute("imageRef"): + raise AttributeError("No imageRef was specified in request") + rebuild["imageRef"] = node.getAttribute("imageRef") + + return rebuild + + def _action_resize(self, node): + if not node.hasAttribute("flavorRef"): + raise AttributeError("No flavorRef was specified in request") + return {"flavorRef": node.getAttribute("flavorRef")} + + def _action_confirm_resize(self, node): + return None + + def _action_revert_resize(self, node): + return None + + def _deserialize_image_action(self, node, allowed_attributes): + data = {} + for attribute in allowed_attributes: + value = node.getAttribute(attribute) + if value: + data[attribute] = value + metadata_node = self.find_first_child_named(node, 'metadata') + if metadata_node is not None: + metadata = self.metadata_deserializer.extract_metadata( + metadata_node) + data['metadata'] = metadata + return data + + def create(self, string): + """Deserialize an xml-formatted server create request""" + dom = minidom.parseString(string) + server = self._extract_server(dom) + return {'body': {'server': server}} + + def _extract_server(self, node): + """Marshal the server attribute of a parsed request""" + server = {} + server_node = self.find_first_child_named(node, 'server') + + attributes = ["name", "imageRef", "flavorRef", "adminPass"] + for attr in attributes: + if server_node.getAttribute(attr): + server[attr] = server_node.getAttribute(attr) + + metadata_node = self.find_first_child_named(server_node, "metadata") + if metadata_node is not None: + server["metadata"] = self.extract_metadata(metadata_node) + + personality = self._extract_personality(server_node) + if personality is not None: + server["personality"] = personality + + return server + + def _extract_personality(self, server_node): + """Marshal the personality attribute of a parsed request""" + node = self.find_first_child_named(server_node, "personality") + if node is not None: + personality = [] + for file_node in self.find_children_named(node, "file"): + item = {} + if file_node.hasAttribute("path"): + item["path"] = file_node.getAttribute("path") + item["contents"] = self.extract_text(file_node) + personality.append(item) + return personality + else: + return None diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index cc889703e..6188e274d 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -23,7 +23,7 @@ import sys import routes import webob.dec import webob.exc -from xml.etree import ElementTree +from lxml import etree from nova import exception from nova import flags @@ -32,6 +32,7 @@ from nova import wsgi as base_wsgi from nova.api.openstack import common from nova.api.openstack import faults from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil LOG = logging.getLogger('extensions') @@ -470,36 +471,38 @@ class ResourceExtension(object): class ExtensionsXMLSerializer(wsgi.XMLDictSerializer): + NSMAP = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM} + def show(self, ext_dict): - ext = self._create_ext_elem(ext_dict['extension']) + ext = etree.Element('extension', nsmap=self.NSMAP) + self._populate_ext(ext, ext_dict['extension']) return self._to_xml(ext) def index(self, exts_dict): - exts = ElementTree.Element('extensions') + exts = etree.Element('extensions', nsmap=self.NSMAP) for ext_dict in exts_dict['extensions']: - exts.append(self._create_ext_elem(ext_dict)) + ext = etree.SubElement(exts, 'extension') + self._populate_ext(ext, ext_dict) return self._to_xml(exts) - def _create_ext_elem(self, ext_dict): - """Create an extension xml element from a dict.""" - ext_elem = ElementTree.Element('extension') + def _populate_ext(self, ext_elem, ext_dict): + """Populate an extension xml element from a dict.""" + ext_elem.set('name', ext_dict['name']) ext_elem.set('namespace', ext_dict['namespace']) ext_elem.set('alias', ext_dict['alias']) ext_elem.set('updated', ext_dict['updated']) - desc = ElementTree.Element('description') + desc = etree.Element('description') desc.text = ext_dict['description'] ext_elem.append(desc) for link in ext_dict.get('links', []): - elem = ElementTree.Element('atom:link') + elem = etree.SubElement(ext_elem, '{%s}link' % xmlutil.XMLNS_ATOM) elem.set('rel', link['rel']) elem.set('href', link['href']) elem.set('type', link['type']) - ext_elem.append(elem) return ext_elem def _to_xml(self, root): - """Convert the xml tree object to an xml string.""" - root.set('xmlns', wsgi.XMLNS_V11) - root.set('xmlns:atom', wsgi.XMLNS_ATOM) - return ElementTree.tostring(root, encoding='UTF-8') + """Convert the xml object to an xml string.""" + + return etree.tostring(root, encoding='UTF-8') diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 9ba8b639e..0aabb9e56 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -98,79 +98,39 @@ class Controller(object): self._image_service.delete(context, id) return webob.exc.HTTPNoContent() - def create(self, req, body): - """Snapshot or backup a server instance and save the image. - - Images now have an `image_type` associated with them, which can be - 'snapshot' or the backup type, like 'daily' or 'weekly'. - - If the image_type is backup-like, then the rotation factor can be - included and that will cause the oldest backups that exceed the - rotation factor to be deleted. - - :param req: `wsgi.Request` object - """ - def get_param(param): - try: - return body["image"][param] - except KeyError: - raise webob.exc.HTTPBadRequest(explanation="Missing required " - "param: %s" % param) - - context = req.environ['nova.context'] - content_type = req.get_content_type() - - if not body: - raise webob.exc.HTTPBadRequest() - - image_type = body["image"].get("image_type", "snapshot") - - try: - server_id = self._server_id_from_req(req, body) - except KeyError: - raise webob.exc.HTTPBadRequest() - - image_name = get_param("name") - props = self._get_extra_properties(req, body) - - if image_type == "snapshot": - image = self._compute_service.snapshot( - context, server_id, image_name, - extra_properties=props) - elif image_type == "backup": - # NOTE(sirp): Unlike snapshot, backup is not a customer facing - # API call; rather, it's used by the internal backup scheduler - if not FLAGS.allow_admin_api: - raise webob.exc.HTTPBadRequest( - explanation="Admin API Required") - - backup_type = get_param("backup_type") - rotation = int(get_param("rotation")) - - image = self._compute_service.backup( - context, server_id, image_name, - backup_type, rotation, extra_properties=props) - else: - LOG.error(_("Invalid image_type '%s' passed") % image_type) - raise webob.exc.HTTPBadRequest(explanation="Invalue image_type: " - "%s" % image_type) - - return dict(image=self.get_builder(req).build(image, detail=True)) - def get_builder(self, request): """Indicates that you must use a Controller subclass.""" raise NotImplementedError() - def _server_id_from_req(self, req, data): - raise NotImplementedError() - - def _get_extra_properties(self, req, data): - return {} - class ControllerV10(Controller): """Version 1.0 specific controller logic.""" + @common.check_snapshots_enabled + def create(self, req, body): + """Snapshot a server instance and save the image.""" + try: + image = body["image"] + except (KeyError, TypeError): + msg = _("Invalid image entity") + raise webob.exc.HTTPBadRequest(explanation=msg) + + try: + image_name = image["name"] + instance_id = image["serverId"] + except KeyError as missing_key: + msg = _("Image entity requires %s") % missing_key + raise webob.exc.HTTPBadRequest(explanation=msg) + + context = req.environ["nova.context"] + props = {'instance_id': instance_id} + image = self._compute_service.snapshot(context, + instance_id, + image_name, + extra_properties=props) + + return dict(image=self.get_builder(req).build(image, detail=True)) + def get_builder(self, request): """Property to get the ViewBuilder class we need to use.""" base_url = request.application_url @@ -184,7 +144,7 @@ class ControllerV10(Controller): """ context = req.environ['nova.context'] filters = self._get_filters(req) - images = self._image_service.index(context, filters) + images = self._image_service.index(context, filters=filters) images = common.limited(images, req) builder = self.get_builder(req).build return dict(images=[builder(image, detail=False) for image in images]) @@ -197,18 +157,11 @@ class ControllerV10(Controller): """ context = req.environ['nova.context'] filters = self._get_filters(req) - images = self._image_service.detail(context, filters) + images = self._image_service.detail(context, filters=filters) images = common.limited(images, req) builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) - def _server_id_from_req(self, req, data): - try: - return data['image']['serverId'] - except KeyError: - msg = _("Expected serverId attribute on server entity.") - raise webob.exc.HTTPBadRequest(explanation=msg) - class ControllerV11(Controller): """Version 1.1 specific controller logic.""" @@ -246,37 +199,8 @@ class ControllerV11(Controller): builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) - def _server_id_from_req(self, req, data): - try: - server_ref = data['image']['serverRef'] - except KeyError: - msg = _("Expected serverRef attribute on server entity.") - raise webob.exc.HTTPBadRequest(explanation=msg) - - if not server_ref.startswith('http'): - return server_ref - - passed = urlparse.urlparse(server_ref) - expected = urlparse.urlparse(req.application_url) - version = expected.path.split('/')[1] - expected_prefix = "/%s/servers/" % version - _empty, _sep, server_id = passed.path.partition(expected_prefix) - scheme_ok = passed.scheme == expected.scheme - host_ok = passed.hostname == expected.hostname - port_ok = (passed.port == expected.port or - passed.port == FLAGS.osapi_port) - if not (scheme_ok and port_ok and host_ok and server_id): - msg = _("serverRef must match request url") - raise webob.exc.HTTPBadRequest(explanation=msg) - - return server_id - - def _get_extra_properties(self, req, data): - server_ref = data['image']['serverRef'] - if not server_ref.startswith('http'): - server_ref = os.path.join(req.application_url, 'servers', - server_ref) - return {'instance_ref': server_ref} + def create(self, *args, **kwargs): + raise webob.exc.HTTPMethodNotAllowed() class ImageXMLSerializer(wsgi.XMLDictSerializer): @@ -369,12 +293,6 @@ class ImageXMLSerializer(wsgi.XMLDictSerializer): image_dict['image']) return self.to_xml_string(node, True) - def create(self, image_dict): - xml_doc = minidom.Document() - node = self._image_to_xml_detailed(xml_doc, - image_dict['image']) - return self.to_xml_string(node, True) - def create_resource(version='1.0'): controller = { diff --git a/nova/api/openstack/schemas/atom-link.rng b/nova/api/openstack/schemas/atom-link.rng new file mode 100644 index 000000000..edba5eee6 --- /dev/null +++ b/nova/api/openstack/schemas/atom-link.rng @@ -0,0 +1,141 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- + -*- rnc -*- + RELAX NG Compact Syntax Grammar for the + Atom Format Specification Version 11 +--> +<grammar xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:s="http://www.ascc.net/xml/schematron" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes"> + <start> + <choice> + <ref name="atomLink"/> + </choice> + </start> + <!-- Common attributes --> + <define name="atomCommonAttributes"> + <optional> + <attribute name="xml:base"> + <ref name="atomUri"/> + </attribute> + </optional> + <optional> + <attribute name="xml:lang"> + <ref name="atomLanguageTag"/> + </attribute> + </optional> + <zeroOrMore> + <ref name="undefinedAttribute"/> + </zeroOrMore> + </define> + <!-- atom:link --> + <define name="atomLink"> + <element name="atom:link"> + <ref name="atomCommonAttributes"/> + <attribute name="href"> + <ref name="atomUri"/> + </attribute> + <optional> + <attribute name="rel"> + <choice> + <ref name="atomNCName"/> + <ref name="atomUri"/> + </choice> + </attribute> + </optional> + <optional> + <attribute name="type"> + <ref name="atomMediaType"/> + </attribute> + </optional> + <optional> + <attribute name="hreflang"> + <ref name="atomLanguageTag"/> + </attribute> + </optional> + <optional> + <attribute name="title"/> + </optional> + <optional> + <attribute name="length"/> + </optional> + <ref name="undefinedContent"/> + </element> + </define> + <!-- Low-level simple types --> + <define name="atomNCName"> + <data type="string"> + <param name="minLength">1</param> + <param name="pattern">[^:]*</param> + </data> + </define> + <!-- Whatever a media type is, it contains at least one slash --> + <define name="atomMediaType"> + <data type="string"> + <param name="pattern">.+/.+</param> + </data> + </define> + <!-- As defined in RFC 3066 --> + <define name="atomLanguageTag"> + <data type="string"> + <param name="pattern">[A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})*</param> + </data> + </define> + <!-- + Unconstrained; it's not entirely clear how IRI fit into + xsd:anyURI so let's not try to constrain it here + --> + <define name="atomUri"> + <text/> + </define> + <!-- Other Extensibility --> + <define name="undefinedAttribute"> + <attribute> + <anyName> + <except> + <name>xml:base</name> + <name>xml:lang</name> + <nsName ns=""/> + </except> + </anyName> + </attribute> + </define> + <define name="undefinedContent"> + <zeroOrMore> + <choice> + <text/> + <ref name="anyForeignElement"/> + </choice> + </zeroOrMore> + </define> + <define name="anyElement"> + <element> + <anyName/> + <zeroOrMore> + <choice> + <attribute> + <anyName/> + </attribute> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </element> + </define> + <define name="anyForeignElement"> + <element> + <anyName> + <except> + <nsName ns="http://www.w3.org/2005/Atom"/> + </except> + </anyName> + <zeroOrMore> + <choice> + <attribute> + <anyName/> + </attribute> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </element> + </define> +</grammar> diff --git a/nova/api/openstack/schemas/atom.rng b/nova/api/openstack/schemas/atom.rng new file mode 100644 index 000000000..c2df4e410 --- /dev/null +++ b/nova/api/openstack/schemas/atom.rng @@ -0,0 +1,597 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- + -*- rnc -*- + RELAX NG Compact Syntax Grammar for the + Atom Format Specification Version 11 +--> +<grammar xmlns:xhtml="http://www.w3.org/1999/xhtml" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:s="http://www.ascc.net/xml/schematron" xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes"> + <start> + <choice> + <ref name="atomFeed"/> + <ref name="atomEntry"/> + </choice> + </start> + <!-- Common attributes --> + <define name="atomCommonAttributes"> + <optional> + <attribute name="xml:base"> + <ref name="atomUri"/> + </attribute> + </optional> + <optional> + <attribute name="xml:lang"> + <ref name="atomLanguageTag"/> + </attribute> + </optional> + <zeroOrMore> + <ref name="undefinedAttribute"/> + </zeroOrMore> + </define> + <!-- Text Constructs --> + <define name="atomPlainTextConstruct"> + <ref name="atomCommonAttributes"/> + <optional> + <attribute name="type"> + <choice> + <value>text</value> + <value>html</value> + </choice> + </attribute> + </optional> + <text/> + </define> + <define name="atomXHTMLTextConstruct"> + <ref name="atomCommonAttributes"/> + <attribute name="type"> + <value>xhtml</value> + </attribute> + <ref name="xhtmlDiv"/> + </define> + <define name="atomTextConstruct"> + <choice> + <ref name="atomPlainTextConstruct"/> + <ref name="atomXHTMLTextConstruct"/> + </choice> + </define> + <!-- Person Construct --> + <define name="atomPersonConstruct"> + <ref name="atomCommonAttributes"/> + <interleave> + <element name="atom:name"> + <text/> + </element> + <optional> + <element name="atom:uri"> + <ref name="atomUri"/> + </element> + </optional> + <optional> + <element name="atom:email"> + <ref name="atomEmailAddress"/> + </element> + </optional> + <zeroOrMore> + <ref name="extensionElement"/> + </zeroOrMore> + </interleave> + </define> + <!-- Date Construct --> + <define name="atomDateConstruct"> + <ref name="atomCommonAttributes"/> + <data type="dateTime"/> + </define> + <!-- atom:feed --> + <define name="atomFeed"> + <element name="atom:feed"> + <s:rule context="atom:feed"> + <s:assert test="atom:author or not(atom:entry[not(atom:author)])">An atom:feed must have an atom:author unless all of its atom:entry children have an atom:author.</s:assert> + </s:rule> + <ref name="atomCommonAttributes"/> + <interleave> + <zeroOrMore> + <ref name="atomAuthor"/> + </zeroOrMore> + <zeroOrMore> + <ref name="atomCategory"/> + </zeroOrMore> + <zeroOrMore> + <ref name="atomContributor"/> + </zeroOrMore> + <optional> + <ref name="atomGenerator"/> + </optional> + <optional> + <ref name="atomIcon"/> + </optional> + <ref name="atomId"/> + <zeroOrMore> + <ref name="atomLink"/> + </zeroOrMore> + <optional> + <ref name="atomLogo"/> + </optional> + <optional> + <ref name="atomRights"/> + </optional> + <optional> + <ref name="atomSubtitle"/> + </optional> + <ref name="atomTitle"/> + <ref name="atomUpdated"/> + <zeroOrMore> + <ref name="extensionElement"/> + </zeroOrMore> + </interleave> + <zeroOrMore> + <ref name="atomEntry"/> + </zeroOrMore> + </element> + </define> + <!-- atom:entry --> + <define name="atomEntry"> + <element name="atom:entry"> + <s:rule context="atom:entry"> + <s:assert test="atom:link[@rel='alternate'] or atom:link[not(@rel)] or atom:content">An atom:entry must have at least one atom:link element with a rel attribute of 'alternate' or an atom:content.</s:assert> + </s:rule> + <s:rule context="atom:entry"> + <s:assert test="atom:author or ../atom:author or atom:source/atom:author">An atom:entry must have an atom:author if its feed does not.</s:assert> + </s:rule> + <ref name="atomCommonAttributes"/> + <interleave> + <zeroOrMore> + <ref name="atomAuthor"/> + </zeroOrMore> + <zeroOrMore> + <ref name="atomCategory"/> + </zeroOrMore> + <optional> + <ref name="atomContent"/> + </optional> + <zeroOrMore> + <ref name="atomContributor"/> + </zeroOrMore> + <ref name="atomId"/> + <zeroOrMore> + <ref name="atomLink"/> + </zeroOrMore> + <optional> + <ref name="atomPublished"/> + </optional> + <optional> + <ref name="atomRights"/> + </optional> + <optional> + <ref name="atomSource"/> + </optional> + <optional> + <ref name="atomSummary"/> + </optional> + <ref name="atomTitle"/> + <ref name="atomUpdated"/> + <zeroOrMore> + <ref name="extensionElement"/> + </zeroOrMore> + </interleave> + </element> + </define> + <!-- atom:content --> + <define name="atomInlineTextContent"> + <element name="atom:content"> + <ref name="atomCommonAttributes"/> + <optional> + <attribute name="type"> + <choice> + <value>text</value> + <value>html</value> + </choice> + </attribute> + </optional> + <zeroOrMore> + <text/> + </zeroOrMore> + </element> + </define> + <define name="atomInlineXHTMLContent"> + <element name="atom:content"> + <ref name="atomCommonAttributes"/> + <attribute name="type"> + <value>xhtml</value> + </attribute> + <ref name="xhtmlDiv"/> + </element> + </define> + <define name="atomInlineOtherContent"> + <element name="atom:content"> + <ref name="atomCommonAttributes"/> + <optional> + <attribute name="type"> + <ref name="atomMediaType"/> + </attribute> + </optional> + <zeroOrMore> + <choice> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </element> + </define> + <define name="atomOutOfLineContent"> + <element name="atom:content"> + <ref name="atomCommonAttributes"/> + <optional> + <attribute name="type"> + <ref name="atomMediaType"/> + </attribute> + </optional> + <attribute name="src"> + <ref name="atomUri"/> + </attribute> + <empty/> + </element> + </define> + <define name="atomContent"> + <choice> + <ref name="atomInlineTextContent"/> + <ref name="atomInlineXHTMLContent"/> + <ref name="atomInlineOtherContent"/> + <ref name="atomOutOfLineContent"/> + </choice> + </define> + <!-- atom:author --> + <define name="atomAuthor"> + <element name="atom:author"> + <ref name="atomPersonConstruct"/> + </element> + </define> + <!-- atom:category --> + <define name="atomCategory"> + <element name="atom:category"> + <ref name="atomCommonAttributes"/> + <attribute name="term"/> + <optional> + <attribute name="scheme"> + <ref name="atomUri"/> + </attribute> + </optional> + <optional> + <attribute name="label"/> + </optional> + <ref name="undefinedContent"/> + </element> + </define> + <!-- atom:contributor --> + <define name="atomContributor"> + <element name="atom:contributor"> + <ref name="atomPersonConstruct"/> + </element> + </define> + <!-- atom:generator --> + <define name="atomGenerator"> + <element name="atom:generator"> + <ref name="atomCommonAttributes"/> + <optional> + <attribute name="uri"> + <ref name="atomUri"/> + </attribute> + </optional> + <optional> + <attribute name="version"/> + </optional> + <text/> + </element> + </define> + <!-- atom:icon --> + <define name="atomIcon"> + <element name="atom:icon"> + <ref name="atomCommonAttributes"/> + <ref name="atomUri"/> + </element> + </define> + <!-- atom:id --> + <define name="atomId"> + <element name="atom:id"> + <ref name="atomCommonAttributes"/> + <ref name="atomUri"/> + </element> + </define> + <!-- atom:logo --> + <define name="atomLogo"> + <element name="atom:logo"> + <ref name="atomCommonAttributes"/> + <ref name="atomUri"/> + </element> + </define> + <!-- atom:link --> + <define name="atomLink"> + <element name="atom:link"> + <ref name="atomCommonAttributes"/> + <attribute name="href"> + <ref name="atomUri"/> + </attribute> + <optional> + <attribute name="rel"> + <choice> + <ref name="atomNCName"/> + <ref name="atomUri"/> + </choice> + </attribute> + </optional> + <optional> + <attribute name="type"> + <ref name="atomMediaType"/> + </attribute> + </optional> + <optional> + <attribute name="hreflang"> + <ref name="atomLanguageTag"/> + </attribute> + </optional> + <optional> + <attribute name="title"/> + </optional> + <optional> + <attribute name="length"/> + </optional> + <ref name="undefinedContent"/> + </element> + </define> + <!-- atom:published --> + <define name="atomPublished"> + <element name="atom:published"> + <ref name="atomDateConstruct"/> + </element> + </define> + <!-- atom:rights --> + <define name="atomRights"> + <element name="atom:rights"> + <ref name="atomTextConstruct"/> + </element> + </define> + <!-- atom:source --> + <define name="atomSource"> + <element name="atom:source"> + <ref name="atomCommonAttributes"/> + <interleave> + <zeroOrMore> + <ref name="atomAuthor"/> + </zeroOrMore> + <zeroOrMore> + <ref name="atomCategory"/> + </zeroOrMore> + <zeroOrMore> + <ref name="atomContributor"/> + </zeroOrMore> + <optional> + <ref name="atomGenerator"/> + </optional> + <optional> + <ref name="atomIcon"/> + </optional> + <optional> + <ref name="atomId"/> + </optional> + <zeroOrMore> + <ref name="atomLink"/> + </zeroOrMore> + <optional> + <ref name="atomLogo"/> + </optional> + <optional> + <ref name="atomRights"/> + </optional> + <optional> + <ref name="atomSubtitle"/> + </optional> + <optional> + <ref name="atomTitle"/> + </optional> + <optional> + <ref name="atomUpdated"/> + </optional> + <zeroOrMore> + <ref name="extensionElement"/> + </zeroOrMore> + </interleave> + </element> + </define> + <!-- atom:subtitle --> + <define name="atomSubtitle"> + <element name="atom:subtitle"> + <ref name="atomTextConstruct"/> + </element> + </define> + <!-- atom:summary --> + <define name="atomSummary"> + <element name="atom:summary"> + <ref name="atomTextConstruct"/> + </element> + </define> + <!-- atom:title --> + <define name="atomTitle"> + <element name="atom:title"> + <ref name="atomTextConstruct"/> + </element> + </define> + <!-- atom:updated --> + <define name="atomUpdated"> + <element name="atom:updated"> + <ref name="atomDateConstruct"/> + </element> + </define> + <!-- Low-level simple types --> + <define name="atomNCName"> + <data type="string"> + <param name="minLength">1</param> + <param name="pattern">[^:]*</param> + </data> + </define> + <!-- Whatever a media type is, it contains at least one slash --> + <define name="atomMediaType"> + <data type="string"> + <param name="pattern">.+/.+</param> + </data> + </define> + <!-- As defined in RFC 3066 --> + <define name="atomLanguageTag"> + <data type="string"> + <param name="pattern">[A-Za-z]{1,8}(-[A-Za-z0-9]{1,8})*</param> + </data> + </define> + <!-- + Unconstrained; it's not entirely clear how IRI fit into + xsd:anyURI so let's not try to constrain it here + --> + <define name="atomUri"> + <text/> + </define> + <!-- Whatever an email address is, it contains at least one @ --> + <define name="atomEmailAddress"> + <data type="string"> + <param name="pattern">.+@.+</param> + </data> + </define> + <!-- Simple Extension --> + <define name="simpleExtensionElement"> + <element> + <anyName> + <except> + <nsName ns="http://www.w3.org/2005/Atom"/> + </except> + </anyName> + <text/> + </element> + </define> + <!-- Structured Extension --> + <define name="structuredExtensionElement"> + <element> + <anyName> + <except> + <nsName ns="http://www.w3.org/2005/Atom"/> + </except> + </anyName> + <choice> + <group> + <oneOrMore> + <attribute> + <anyName/> + </attribute> + </oneOrMore> + <zeroOrMore> + <choice> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </group> + <group> + <zeroOrMore> + <attribute> + <anyName/> + </attribute> + </zeroOrMore> + <group> + <optional> + <text/> + </optional> + <oneOrMore> + <ref name="anyElement"/> + </oneOrMore> + <zeroOrMore> + <choice> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </group> + </group> + </choice> + </element> + </define> + <!-- Other Extensibility --> + <define name="extensionElement"> + <choice> + <ref name="simpleExtensionElement"/> + <ref name="structuredExtensionElement"/> + </choice> + </define> + <define name="undefinedAttribute"> + <attribute> + <anyName> + <except> + <name>xml:base</name> + <name>xml:lang</name> + <nsName ns=""/> + </except> + </anyName> + </attribute> + </define> + <define name="undefinedContent"> + <zeroOrMore> + <choice> + <text/> + <ref name="anyForeignElement"/> + </choice> + </zeroOrMore> + </define> + <define name="anyElement"> + <element> + <anyName/> + <zeroOrMore> + <choice> + <attribute> + <anyName/> + </attribute> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </element> + </define> + <define name="anyForeignElement"> + <element> + <anyName> + <except> + <nsName ns="http://www.w3.org/2005/Atom"/> + </except> + </anyName> + <zeroOrMore> + <choice> + <attribute> + <anyName/> + </attribute> + <text/> + <ref name="anyElement"/> + </choice> + </zeroOrMore> + </element> + </define> + <!-- XHTML --> + <define name="anyXHTML"> + <element> + <nsName ns="http://www.w3.org/1999/xhtml"/> + <zeroOrMore> + <choice> + <attribute> + <anyName/> + </attribute> + <text/> + <ref name="anyXHTML"/> + </choice> + </zeroOrMore> + </element> + </define> + <define name="xhtmlDiv"> + <element name="xhtml:div"> + <zeroOrMore> + <choice> + <attribute> + <anyName/> + </attribute> + <text/> + <ref name="anyXHTML"/> + </choice> + </zeroOrMore> + </element> + </define> +</grammar> diff --git a/nova/api/openstack/schemas/v1.1/extension.rng b/nova/api/openstack/schemas/v1.1/extension.rng new file mode 100644 index 000000000..336659755 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/extension.rng @@ -0,0 +1,11 @@ +<element name="extension" ns="http://docs.openstack.org/compute/api/v1.1" + xmlns="http://relaxng.org/ns/structure/1.0"> + <attribute name="alias"> <text/> </attribute> + <attribute name="name"> <text/> </attribute> + <attribute name="namespace"> <text/> </attribute> + <attribute name="updated"> <text/> </attribute> + <element name="description"> <text/> </element> + <zeroOrMore> + <externalRef href="../atom-link.rng"/> + </zeroOrMore> +</element> diff --git a/nova/api/openstack/schemas/v1.1/extensions.rng b/nova/api/openstack/schemas/v1.1/extensions.rng new file mode 100644 index 000000000..4d8bff646 --- /dev/null +++ b/nova/api/openstack/schemas/v1.1/extensions.rng @@ -0,0 +1,6 @@ +<element name="extensions" xmlns="http://relaxng.org/ns/structure/1.0" + ns="http://docs.openstack.org/compute/api/v1.1"> + <zeroOrMore> + <externalRef href="extension.rng"/> + </zeroOrMore> +</element> diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index d4f42bbf5..b0b014f86 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -18,6 +18,7 @@ from webob import exc from nova import compute +from nova.api.openstack import common from nova.api.openstack import wsgi from nova import exception from nova import quota @@ -31,36 +32,37 @@ class Controller(object): super(Controller, self).__init__() def _get_metadata(self, context, server_id): - metadata = self.compute_api.get_instance_metadata(context, server_id) + try: + meta = self.compute_api.get_instance_metadata(context, server_id) + except exception.InstanceNotFound: + msg = _('Server does not exist') + raise exc.HTTPNotFound(explanation=msg) + meta_dict = {} - for key, value in metadata.iteritems(): + for key, value in meta.iteritems(): meta_dict[key] = value - return dict(metadata=meta_dict) - - def _check_body(self, body): - if body == None or body == "": - expl = _('No Request Body') - raise exc.HTTPBadRequest(explanation=expl) + return meta_dict def index(self, req, server_id): """ Returns the list of metadata for a given instance """ context = req.environ['nova.context'] - try: - return self._get_metadata(context, server_id) - except exception.InstanceNotFound: - msg = _('Server %(server_id)s does not exist') % locals() - raise exc.HTTPNotFound(explanation=msg) + return {'metadata': self._get_metadata(context, server_id)} def create(self, req, server_id, body): - self._check_body(body) + try: + metadata = body['metadata'] + except (KeyError, TypeError): + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + context = req.environ['nova.context'] - metadata = body.get('metadata') + try: self.compute_api.update_or_create_instance_metadata(context, server_id, metadata) except exception.InstanceNotFound: - msg = _('Server %(server_id)s does not exist') % locals() + msg = _('Server does not exist') raise exc.HTTPNotFound(explanation=msg) except quota.QuotaError as error: @@ -69,51 +71,80 @@ class Controller(object): return body def update(self, req, server_id, id, body): - self._check_body(body) - context = req.environ['nova.context'] - if not id in body: + try: + meta_item = body['meta'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + try: + meta_value = meta_item.pop(id) + except (AttributeError, KeyError): expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) - if len(body) > 1: + + if len(meta_item) > 0: expl = _('Request body contains too many items') raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['nova.context'] + self._set_instance_metadata(context, server_id, meta_item) + + return {'meta': {id: meta_value}} + + def update_all(self, req, server_id, body): + try: + metadata = body['metadata'] + except (TypeError, KeyError): + expl = _('Malformed request body') + raise exc.HTTPBadRequest(explanation=expl) + + context = req.environ['nova.context'] + self._set_instance_metadata(context, server_id, metadata) + + return {'metadata': metadata} + + def _set_instance_metadata(self, context, server_id, metadata): try: self.compute_api.update_or_create_instance_metadata(context, server_id, - body) + metadata) except exception.InstanceNotFound: - msg = _('Server %(server_id)s does not exist') % locals() + msg = _('Server does not exist') raise exc.HTTPNotFound(explanation=msg) + except ValueError: + msg = _("Malformed request body") + raise exc.HTTPBadRequest(explanation=msg) + except quota.QuotaError as error: self._handle_quota_error(error) - return body - def show(self, req, server_id, id): """ Return a single metadata item """ context = req.environ['nova.context'] - try: - data = self._get_metadata(context, server_id) - except exception.InstanceNotFound: - msg = _('Server %(server_id)s does not exist') % locals() - raise exc.HTTPNotFound(explanation=msg) + data = self._get_metadata(context, server_id) try: - return {id: data['metadata'][id]} + return {'meta': {id: data[id]}} except KeyError: - msg = _("metadata item %s was not found" % (id)) + msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) def delete(self, req, server_id, id): """ Deletes an existing metadata """ context = req.environ['nova.context'] + + metadata = self._get_metadata(context, server_id) + try: - self.compute_api.delete_instance_metadata(context, server_id, id) - except exception.InstanceNotFound: - msg = _('Server %(server_id)s does not exist') % locals() + meta_key = metadata[id] + except KeyError: + msg = _("Metadata item was not found") raise exc.HTTPNotFound(explanation=msg) + self.compute_api.delete_instance_metadata(context, server_id, meta_key) + def _handle_quota_error(self, error): """Reraise quota errors as api-specific http exceptions.""" if error.code == "MetadataLimitExceeded": @@ -122,10 +153,16 @@ class Controller(object): def create_resource(): - body_serializers = { - 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11), + headers_serializer = common.MetadataHeadersSerializer() + + body_deserializers = { + 'application/xml': common.MetadataXMLDeserializer(), } - serializer = wsgi.ResponseSerializer(body_serializers) + body_serializers = { + 'application/xml': common.MetadataXMLSerializer(), + } + serializer = wsgi.ResponseSerializer(body_serializers, headers_serializer) + deserializer = wsgi.RequestDeserializer(body_deserializers) - return wsgi.Resource(Controller(), serializer=serializer) + return wsgi.Resource(Controller(), deserializer, serializer) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index f6841318d..f1a27a98c 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -14,14 +14,14 @@ # under the License. import base64 +import os import traceback from webob import exc -import webob from xml.dom import minidom +import webob from nova import compute -from nova import db from nova import exception from nova import flags from nova import log as logging @@ -29,13 +29,14 @@ from nova import utils from nova.api.openstack import common from nova.api.openstack import create_instance_helper as helper from nova.api.openstack import ips +from nova.api.openstack import wsgi +from nova.compute import instance_types +from nova.scheduler import api as scheduler_api +import nova.api.openstack import nova.api.openstack.views.addresses import nova.api.openstack.views.flavors import nova.api.openstack.views.images import nova.api.openstack.views.servers -from nova.api.openstack import wsgi -import nova.api.openstack -from nova.scheduler import api as scheduler_api LOG = logging.getLogger('nova.api.openstack.servers') @@ -154,23 +155,95 @@ class Controller(object): @scheduler_api.redirect_handler def action(self, req, id, body): - """Multi-purpose method used to reboot, rebuild, or - resize a server""" + """Multi-purpose method used to take actions on a server""" - actions = { + self.actions = { 'changePassword': self._action_change_password, 'reboot': self._action_reboot, 'resize': self._action_resize, 'confirmResize': self._action_confirm_resize, 'revertResize': self._action_revert_resize, 'rebuild': self._action_rebuild, - 'migrate': self._action_migrate} + 'createImage': self._action_create_image, + } - for key in actions.keys(): + if FLAGS.allow_admin_api: + admin_actions = { + 'createBackup': self._action_create_backup, + } + self.actions.update(admin_actions) + + for key in self.actions.keys(): if key in body: - return actions[key](body, req, id) + return self.actions[key](body, req, id) + raise exc.HTTPNotImplemented() + def _action_create_backup(self, input_dict, req, instance_id): + """Backup a server instance. + + Images now have an `image_type` associated with them, which can be + 'snapshot' or the backup type, like 'daily' or 'weekly'. + + If the image_type is backup-like, then the rotation factor can be + included and that will cause the oldest backups that exceed the + rotation factor to be deleted. + + """ + entity = input_dict["createBackup"] + + try: + image_name = entity["name"] + backup_type = entity["backup_type"] + rotation = entity["rotation"] + + except KeyError as missing_key: + msg = _("createBackup entity requires %s attribute") % missing_key + raise webob.exc.HTTPBadRequest(explanation=msg) + + except TypeError: + msg = _("Malformed createBackup entity") + raise webob.exc.HTTPBadRequest(explanation=msg) + + try: + rotation = int(rotation) + except ValueError: + msg = _("createBackup attribute 'rotation' must be an integer") + raise webob.exc.HTTPBadRequest(explanation=msg) + + # preserve link to server in image properties + server_ref = os.path.join(req.application_url, + 'servers', + str(instance_id)) + props = {'instance_ref': server_ref} + + metadata = entity.get('metadata', {}) + try: + props.update(metadata) + except ValueError: + msg = _("Invalid metadata") + raise webob.exc.HTTPBadRequest(explanation=msg) + + context = req.environ["nova.context"] + image = self.compute_api.backup(context, + instance_id, + image_name, + backup_type, + rotation, + extra_properties=props) + + # build location of newly-created image entity + image_id = str(image['id']) + image_ref = os.path.join(req.application_url, 'images', image_id) + + resp = webob.Response(status_int=202) + resp.headers['Location'] = image_ref + return resp + + @common.check_snapshots_enabled + def _action_create_image(self, input_dict, req, id): + return exc.HTTPNotImplemented() + def _action_change_password(self, input_dict, req, id): return exc.HTTPNotImplemented() @@ -195,10 +268,16 @@ class Controller(object): def _action_reboot(self, input_dict, req, id): if 'reboot' in input_dict and 'type' in input_dict['reboot']: - reboot_type = input_dict['reboot']['type'] + valid_reboot_types = ['HARD', 'SOFT'] + reboot_type = input_dict['reboot']['type'].upper() + if not valid_reboot_types.count(reboot_type): + msg = _("Argument 'type' for reboot is not HARD or SOFT") + LOG.exception(msg) + raise exc.HTTPBadRequest(explanation=msg) else: - LOG.exception(_("Missing argument 'type' for reboot")) - raise exc.HTTPUnprocessableEntity() + msg = _("Missing argument 'type' for reboot") + LOG.exception(msg) + raise exc.HTTPBadRequest(explanation=msg) try: # TODO(gundlach): pass reboot_type, support soft reboot in # virt driver @@ -208,14 +287,6 @@ class Controller(object): raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) - def _action_migrate(self, input_dict, req, id): - try: - self.compute_api.resize(req.environ['nova.context'], id) - except Exception, e: - LOG.exception(_("Error in migrate %s"), e) - raise exc.HTTPBadRequest() - return webob.Response(status_int=202) - @scheduler_api.redirect_handler def lock(self, req, id): """ @@ -226,7 +297,7 @@ class Controller(object): context = req.environ['nova.context'] try: self.compute_api.lock(context, id) - except: + except Exception: readable = traceback.format_exc() LOG.exception(_("Compute.api::lock %s"), readable) raise exc.HTTPUnprocessableEntity() @@ -242,7 +313,7 @@ class Controller(object): context = req.environ['nova.context'] try: self.compute_api.unlock(context, id) - except: + except Exception: readable = traceback.format_exc() LOG.exception(_("Compute.api::unlock %s"), readable) raise exc.HTTPUnprocessableEntity() @@ -257,14 +328,14 @@ class Controller(object): context = req.environ['nova.context'] try: self.compute_api.get_lock(context, id) - except: + except Exception: readable = traceback.format_exc() LOG.exception(_("Compute.api::get_lock %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @scheduler_api.redirect_handler - def reset_network(self, req, id, body): + def reset_network(self, req, id): """ Reset networking on an instance (admin only). @@ -272,14 +343,14 @@ class Controller(object): context = req.environ['nova.context'] try: self.compute_api.reset_network(context, id) - except: + except Exception: readable = traceback.format_exc() LOG.exception(_("Compute.api::reset_network %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @scheduler_api.redirect_handler - def inject_network_info(self, req, id, body): + def inject_network_info(self, req, id): """ Inject network info for an instance (admin only). @@ -287,67 +358,76 @@ class Controller(object): context = req.environ['nova.context'] try: self.compute_api.inject_network_info(context, id) - except: + except Exception: readable = traceback.format_exc() LOG.exception(_("Compute.api::inject_network_info %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @scheduler_api.redirect_handler - def pause(self, req, id, body): + def pause(self, req, id): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] try: self.compute_api.pause(ctxt, id) - except: + except Exception: readable = traceback.format_exc() LOG.exception(_("Compute.api::pause %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @scheduler_api.redirect_handler - def unpause(self, req, id, body): + def unpause(self, req, id): """ Permit Admins to Unpause the server. """ ctxt = req.environ['nova.context'] try: self.compute_api.unpause(ctxt, id) - except: + except Exception: readable = traceback.format_exc() LOG.exception(_("Compute.api::unpause %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @scheduler_api.redirect_handler - def suspend(self, req, id, body): + def suspend(self, req, id): """permit admins to suspend the server""" context = req.environ['nova.context'] try: self.compute_api.suspend(context, id) - except: + except Exception: readable = traceback.format_exc() LOG.exception(_("compute.api::suspend %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @scheduler_api.redirect_handler - def resume(self, req, id, body): + def resume(self, req, id): """permit admins to resume the server from suspend""" context = req.environ['nova.context'] try: self.compute_api.resume(context, id) - except: + except Exception: readable = traceback.format_exc() LOG.exception(_("compute.api::resume %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202) @scheduler_api.redirect_handler + def migrate(self, req, id): + try: + self.compute_api.resize(req.environ['nova.context'], id) + except Exception, e: + LOG.exception(_("Error in migrate %s"), e) + raise exc.HTTPBadRequest() + return webob.Response(status_int=202) + + @scheduler_api.redirect_handler def rescue(self, req, id): """Permit users to rescue the server.""" context = req.environ["nova.context"] try: self.compute_api.rescue(context, id) - except: + except Exception: readable = traceback.format_exc() LOG.exception(_("compute.api::rescue %s"), readable) raise exc.HTTPUnprocessableEntity() @@ -359,7 +439,7 @@ class Controller(object): context = req.environ["nova.context"] try: self.compute_api.unrescue(context, id) - except: + except Exception: readable = traceback.format_exc() LOG.exception(_("compute.api::unrescue %s"), readable) raise exc.HTTPUnprocessableEntity() @@ -405,6 +485,24 @@ class Controller(object): error=item.error)) return dict(actions=actions) + def resize(self, req, instance_id, flavor_id): + """Begin the resize process with given instance/flavor.""" + context = req.environ["nova.context"] + + try: + self.compute_api.resize(context, instance_id, flavor_id) + except exception.FlavorNotFound: + msg = _("Unable to locate requested flavor.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.CannotResizeToSameSize: + msg = _("Resize requires a change in size.") + raise exc.HTTPBadRequest(explanation=msg) + except exception.CannotResizeToSmallerSize: + msg = _("Resizing to a smaller size is not supported.") + raise exc.HTTPBadRequest(explanation=msg) + + return webob.Response(status_int=202) + class ControllerV10(Controller): @@ -438,14 +536,13 @@ class ControllerV10(Controller): def _action_resize(self, input_dict, req, id): """ Resizes a given instance to the flavor size requested """ - if 'resize' in input_dict and 'flavorId' in input_dict['resize']: - flavor_id = input_dict['resize']['flavorId'] - self.compute_api.resize(req.environ['nova.context'], id, - flavor_id) - else: - LOG.exception(_("Missing 'flavorId' argument for resize")) - raise exc.HTTPUnprocessableEntity() - return webob.Response(status_int=202) + try: + flavor_id = input_dict["resize"]["flavorId"] + except (KeyError, TypeError): + msg = _("Resize requests require 'flavorId' attribute.") + raise exc.HTTPBadRequest(explanation=msg) + + return self.resize(req, id, flavor_id) def _action_rebuild(self, info, request, instance_id): context = request.environ['nova.context'] @@ -555,18 +652,15 @@ class ControllerV11(Controller): def _action_resize(self, input_dict, req, id): """ Resizes a given instance to the flavor size requested """ try: - if 'resize' in input_dict and 'flavorRef' in input_dict['resize']: - flavor_ref = input_dict['resize']['flavorRef'] - flavor_id = common.get_id_from_href(flavor_ref) - self.compute_api.resize(req.environ['nova.context'], id, - flavor_id) - else: - LOG.exception(_("Missing 'flavorRef' argument for resize")) - raise exc.HTTPUnprocessableEntity() - except Exception, e: - LOG.exception(_("Error in resize %s"), e) - raise exc.HTTPBadRequest() - return webob.Response(status_int=202) + flavor_ref = input_dict["resize"]["flavorRef"] + if not flavor_ref: + msg = _("Resize request has invalid 'flavorRef' attribute.") + raise exc.HTTPBadRequest(explanation=msg) + except (KeyError, TypeError): + msg = _("Resize requests require 'flavorRef' attribute.") + raise exc.HTTPBadRequest(explanation=msg) + + return self.resize(req, id, flavor_ref) def _action_rebuild(self, info, request, instance_id): context = request.environ['nova.context'] @@ -596,6 +690,49 @@ class ControllerV11(Controller): return webob.Response(status_int=202) + @common.check_snapshots_enabled + def _action_create_image(self, input_dict, req, instance_id): + """Snapshot a server instance.""" + entity = input_dict.get("createImage", {}) + + try: + image_name = entity["name"] + + except KeyError: + msg = _("createImage entity requires name attribute") + raise webob.exc.HTTPBadRequest(explanation=msg) + + except TypeError: + msg = _("Malformed createImage entity") + raise webob.exc.HTTPBadRequest(explanation=msg) + + # preserve link to server in image properties + server_ref = os.path.join(req.application_url, + 'servers', + str(instance_id)) + props = {'instance_ref': server_ref} + + metadata = entity.get('metadata', {}) + try: + props.update(metadata) + except ValueError: + msg = _("Invalid metadata") + raise webob.exc.HTTPBadRequest(explanation=msg) + + context = req.environ['nova.context'] + image = self.compute_api.snapshot(context, + instance_id, + image_name, + extra_properties=props) + + # build location of newly-created image entity + image_id = str(image['id']) + image_ref = os.path.join(req.application_url, 'images', image_id) + + resp = webob.Response(status_int=202) + resp.headers['Location'] = image_ref + return resp + def get_default_xmlns(self, req): return common.XML_NS_V11 @@ -765,8 +902,13 @@ def create_resource(version='1.0'): 'application/xml': xml_serializer, } + xml_deserializer = { + '1.0': helper.ServerXMLDeserializer(), + '1.1': helper.ServerXMLDeserializerV11(), + }[version] + body_deserializers = { - 'application/xml': helper.ServerXMLDeserializer(), + 'application/xml': xml_deserializer, } serializer = wsgi.ResponseSerializer(body_serializers, headers_serializer) diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index df7a94b7e..e2f892fb6 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -24,7 +24,66 @@ import nova.api.openstack.views.versions from nova.api.openstack import wsgi -ATOM_XMLNS = "http://www.w3.org/2005/Atom" +VERSIONS = { + "v1.0": { + "id": "v1.0", + "status": "DEPRECATED", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/cs-devguide-20110125.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.0+xml", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.0+json", + } + ], + }, + "v1.1": { + "id": "v1.1", + "status": "CURRENT", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/cs-devguide-20110125.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.1+xml", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.1+json", + } + ], + }, +} class Versions(wsgi.Resource): @@ -36,16 +95,20 @@ class Versions(wsgi.Resource): } } + headers_serializer = VersionsHeadersSerializer() + body_serializers = { 'application/atom+xml': VersionsAtomSerializer(metadata=metadata), 'application/xml': VersionsXMLSerializer(metadata=metadata), } - serializer = wsgi.ResponseSerializer(body_serializers) + serializer = wsgi.ResponseSerializer( + body_serializers=body_serializers, + headers_serializer=headers_serializer) supported_content_types = ('application/json', 'application/xml', 'application/atom+xml') - deserializer = wsgi.RequestDeserializer( + deserializer = VersionsRequestDeserializer( supported_content_types=supported_content_types) wsgi.Resource.__init__(self, None, serializer=serializer, @@ -53,60 +116,131 @@ class Versions(wsgi.Resource): def dispatch(self, request, *args): """Respond to a request for all OpenStack API versions.""" - version_objs = [ - { - "id": "v1.1", - "status": "CURRENT", - #TODO(wwolf) get correct value for these - "updated": "2011-07-18T11:30:00Z", - }, - { - "id": "v1.0", - "status": "DEPRECATED", - #TODO(wwolf) get correct value for these - "updated": "2010-10-09T11:30:00Z", - }, - ] - builder = nova.api.openstack.views.versions.get_view_builder(request) - versions = [builder.build(version) for version in version_objs] - return dict(versions=versions) + if request.path == '/': + # List Versions + return builder.build_versions(VERSIONS) + else: + # Versions Multiple Choice + return builder.build_choices(VERSIONS, request) + + +class VersionV10(object): + def show(self, req): + builder = nova.api.openstack.views.versions.get_view_builder(req) + return builder.build_version(VERSIONS['v1.0']) + + +class VersionV11(object): + def show(self, req): + builder = nova.api.openstack.views.versions.get_view_builder(req) + return builder.build_version(VERSIONS['v1.1']) + + +class VersionsRequestDeserializer(wsgi.RequestDeserializer): + def get_expected_content_type(self, request): + supported_content_types = list(self.supported_content_types) + if request.path != '/': + # Remove atom+xml accept type for 300 responses + if 'application/atom+xml' in supported_content_types: + supported_content_types.remove('application/atom+xml') + + return request.best_match_content_type(supported_content_types) + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + args = {} + if request_environment['PATH_INFO'] == '/': + args['action'] = 'index' + else: + args['action'] = 'multi' + + return args class VersionsXMLSerializer(wsgi.XMLDictSerializer): - def _versions_to_xml(self, versions): - root = self._xml_doc.createElement('versions') + #TODO(wwolf): this is temporary until we get rid of toprettyxml + # in the base class (XMLDictSerializer), which I plan to do in + # another branch + def to_xml_string(self, node, has_atom=False): + self._add_xmlns(node, has_atom) + return node.toxml(encoding='UTF-8') + + def _versions_to_xml(self, versions, name="versions", xmlns=None): + root = self._xml_doc.createElement(name) + root.setAttribute("xmlns", wsgi.XMLNS_V11) + root.setAttribute("xmlns:atom", wsgi.XMLNS_ATOM) for version in versions: root.appendChild(self._create_version_node(version)) return root - def _create_version_node(self, version): + def _create_media_types(self, media_types): + base = self._xml_doc.createElement('media-types') + for type in media_types: + node = self._xml_doc.createElement('media-type') + node.setAttribute('base', type['base']) + node.setAttribute('type', type['type']) + base.appendChild(node) + + return base + + def _create_version_node(self, version, create_ns=False): version_node = self._xml_doc.createElement('version') + if create_ns: + xmlns = wsgi.XMLNS_V11 + xmlns_atom = wsgi.XMLNS_ATOM + version_node.setAttribute('xmlns', xmlns) + version_node.setAttribute('xmlns:atom', xmlns_atom) + version_node.setAttribute('id', version['id']) version_node.setAttribute('status', version['status']) - version_node.setAttribute('updated', version['updated']) + if 'updated' in version: + version_node.setAttribute('updated', version['updated']) + + if 'media-types' in version: + media_types = self._create_media_types(version['media-types']) + version_node.appendChild(media_types) - for link in version['links']: - link_node = self._xml_doc.createElement('atom:link') - link_node.setAttribute('rel', link['rel']) - link_node.setAttribute('href', link['href']) - version_node.appendChild(link_node) + link_nodes = self._create_link_nodes(self._xml_doc, version['links']) + for link in link_nodes: + version_node.appendChild(link) return version_node - def default(self, data): + def index(self, data): self._xml_doc = minidom.Document() node = self._versions_to_xml(data['versions']) return self.to_xml_string(node) + def show(self, data): + self._xml_doc = minidom.Document() + node = self._create_version_node(data['version'], True) + + return self.to_xml_string(node) + + def multi(self, data): + self._xml_doc = minidom.Document() + node = self._versions_to_xml(data['choices'], 'choices', + xmlns=wsgi.XMLNS_V11) + + return self.to_xml_string(node) + class VersionsAtomSerializer(wsgi.XMLDictSerializer): + #TODO(wwolf): this is temporary until we get rid of toprettyxml + # in the base class (XMLDictSerializer), which I plan to do in + # another branch + def to_xml_string(self, node, has_atom=False): + self._add_xmlns(node, has_atom) + return node.toxml(encoding='UTF-8') + def __init__(self, metadata=None, xmlns=None): + self.metadata = metadata or {} if not xmlns: - self.xmlns = ATOM_XMLNS + self.xmlns = wsgi.XMLNS_ATOM else: self.xmlns = xmlns @@ -135,8 +269,33 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): link_href = link_href.rstrip('/') return link_href.rsplit('/', 1)[0] + '/' - def _create_meta(self, root, versions): - title = self._create_text_elem('title', 'Available API Versions', + def _create_detail_meta(self, root, version): + title = self._create_text_elem('title', "About This Version", + type='text') + + updated = self._create_text_elem('updated', version['updated']) + + uri = version['links'][0]['href'] + id = self._create_text_elem('id', uri) + + link = self._xml_doc.createElement('link') + link.setAttribute('rel', 'self') + link.setAttribute('href', uri) + + author = self._xml_doc.createElement('author') + author_name = self._create_text_elem('name', 'Rackspace') + author_uri = self._create_text_elem('uri', 'http://www.rackspace.com/') + author.appendChild(author_name) + author.appendChild(author_uri) + + root.appendChild(title) + root.appendChild(updated) + root.appendChild(id) + root.appendChild(author) + root.appendChild(link) + + def _create_list_meta(self, root, versions): + title = self._create_text_elem('title', "Available API Versions", type='text') # Set this updated to the most recently updated version recent = self._get_most_recent_update(versions) @@ -144,6 +303,7 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): base_url = self._get_base_url(versions[0]['links'][0]['href']) id = self._create_text_elem('id', base_url) + link = self._xml_doc.createElement('link') link.setAttribute('rel', 'self') link.setAttribute('href', base_url) @@ -178,7 +338,10 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): link_node = self._xml_doc.createElement('link') link_node.setAttribute('rel', link['rel']) link_node.setAttribute('href', link['href']) - entry.appendChild(link_node) + if 'type' in link: + link_node.setAttribute('type', link['type']) + + entry.appendChild(link_node) content = self._create_text_elem('content', 'Version %s %s (%s)' % @@ -190,10 +353,45 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer): entry.appendChild(content) root.appendChild(entry) - def default(self, data): + def index(self, data): self._xml_doc = minidom.Document() node = self._xml_doc.createElementNS(self.xmlns, 'feed') - self._create_meta(node, data['versions']) + self._create_list_meta(node, data['versions']) self._create_version_entries(node, data['versions']) return self.to_xml_string(node) + + def show(self, data): + self._xml_doc = minidom.Document() + node = self._xml_doc.createElementNS(self.xmlns, 'feed') + self._create_detail_meta(node, data['version']) + self._create_version_entries(node, [data['version']]) + + return self.to_xml_string(node) + + +class VersionsHeadersSerializer(wsgi.ResponseHeadersSerializer): + def multi(self, response, data): + response.status_int = 300 + + +def create_resource(version='1.0'): + controller = { + '1.0': VersionV10, + '1.1': VersionV11, + }[version]() + + body_serializers = { + 'application/xml': VersionsXMLSerializer(), + 'application/atom+xml': VersionsAtomSerializer(), + } + serializer = wsgi.ResponseSerializer(body_serializers) + + supported_content_types = ('application/json', + 'application/xml', + 'application/atom+xml') + deserializer = wsgi.RequestDeserializer( + supported_content_types=supported_content_types) + + return wsgi.Resource(controller, serializer=serializer, + deserializer=deserializer) diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py index 873ce212a..912303d14 100644 --- a/nova/api/openstack/views/images.py +++ b/nova/api/openstack/views/images.py @@ -77,7 +77,9 @@ class ViewBuilder(object): "status": image_obj.get("status"), }) - if image["status"] == "SAVING": + if image["status"].upper() == "ACTIVE": + image["progress"] = 100 + else: image["progress"] = 0 return image diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 659a43522..2873a8e0f 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -150,10 +150,8 @@ class ViewBuilderV11(ViewBuilder): def _build_detail(self, inst): response = super(ViewBuilderV11, self)._build_detail(inst) - response['server']['created'] = \ - self._convert_timeformat(inst['created_at']) - response['server']['updated'] = \ - self._convert_timeformat(inst['updated_at']) + response['server']['created'] = utils.isotime(inst['created_at']) + response['server']['updated'] = utils.isotime(inst['updated_at']) if 'status' in response['server']: if response['server']['status'] == "ACTIVE": response['server']['progress'] = 100 @@ -224,11 +222,3 @@ class ViewBuilderV11(ViewBuilder): """Create an url that refers to a specific flavor id.""" return os.path.join(common.remove_version_from_href(self.base_url), "servers", str(server_id)) - - def _convert_timeformat(self, date_time): - """Converts the given time into the common time format - - :param date_time: the datetime object to convert - - """ - return date_time.strftime(utils.TIME_FORMAT) diff --git a/nova/api/openstack/views/versions.py b/nova/api/openstack/views/versions.py index 9fa8f49dc..03da80818 100644 --- a/nova/api/openstack/views/versions.py +++ b/nova/api/openstack/views/versions.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import os @@ -31,16 +32,44 @@ class ViewBuilder(object): """ self.base_url = base_url - def build(self, version_data): - """Generic method used to generate a version entity.""" - version = { - "id": version_data["id"], - "status": version_data["status"], - "updated": version_data["updated"], - "links": self._build_links(version_data), - } + def build_choices(self, VERSIONS, req): + version_objs = [] + for version in VERSIONS: + version = VERSIONS[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "links": [ + { + "rel": "self", + "href": self.generate_href(version['id'], req.path), + }, + ], + "media-types": version['media-types'], + }) - return version + return dict(choices=version_objs) + + def build_versions(self, versions): + version_objs = [] + for version in versions: + version = versions[version] + version_objs.append({ + "id": version['id'], + "status": version['status'], + "updated": version['updated'], + "links": self._build_links(version), + }) + + return dict(versions=version_objs) + + def build_version(self, version): + reval = copy.deepcopy(version) + reval['links'].insert(0, { + "rel": "self", + "href": self.base_url.rstrip('/') + '/', + }) + return dict(version=reval) def _build_links(self, version_data): """Generate a container of links that refer to the provided version.""" @@ -55,6 +84,11 @@ class ViewBuilder(object): return links - def generate_href(self, version_number): + def generate_href(self, version_number, path=None): """Create an url that refers to a specific version_number.""" - return os.path.join(self.base_url, version_number) + '/' + version_number = version_number.strip('/') + if path: + path = path.strip('/') + return os.path.join(self.base_url, version_number, path) + else: + return os.path.join(self.base_url, version_number) + '/' diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 53dab22e8..0eb47044e 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -13,6 +13,7 @@ from nova import wsgi XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' + XMLNS_ATOM = 'http://www.w3.org/2005/Atom' LOG = logging.getLogger('nova.api.openstack.wsgi') @@ -386,6 +387,8 @@ class XMLDictSerializer(DictSerializer): link_node = xml_doc.createElement('atom:link') link_node.setAttribute('rel', link['rel']) link_node.setAttribute('href', link['href']) + if 'type' in link: + link_node.setAttribute('type', link['type']) link_nodes.append(link_node) return link_nodes diff --git a/nova/api/openstack/xmlutil.py b/nova/api/openstack/xmlutil.py new file mode 100644 index 000000000..97ad90ada --- /dev/null +++ b/nova/api/openstack/xmlutil.py @@ -0,0 +1,37 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os.path + +from lxml import etree + +from nova import utils + + +XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' +XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' +XMLNS_ATOM = 'http://www.w3.org/2005/Atom' + + +def validate_schema(xml, schema_name): + if type(xml) is str: + xml = etree.fromstring(xml) + schema_path = os.path.join(utils.novadir(), + 'nova/api/openstack/schemas/v1.1/%s.rng' % schema_name) + schema_doc = etree.parse(schema_path) + relaxng = etree.RelaxNG(schema_doc) + relaxng.assertValid(xml) diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index f7fd87bcd..a2bf267ed 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -166,7 +166,7 @@ class Controller(object): return self.helper._get_server_admin_password_old_style(server) -class ControllerV11(object): +class ControllerV11(Controller): """Controller for 1.1 Zone resources.""" def _get_server_admin_password(self, server): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index b6131fb7f..6205cfb56 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -518,6 +518,14 @@ class AuthManager(object): return drv.get_user_roles(User.safe_id(user), Project.safe_id(project)) + def get_active_roles(self, user, project=None): + """Get all active roles for context""" + if project: + roles = FLAGS.allowed_roles + ['projectmanager'] + else: + roles = FLAGS.global_roles + return [role for role in roles if self.has_role(user, role, project)] + def get_project(self, pid): """Get project object by id""" with self.driver() as drv: @@ -730,10 +738,6 @@ class AuthManager(object): with self.driver() as drv: drv.modify_user(uid, access_key, secret_key, admin) - @staticmethod - def get_key_pairs(context): - return db.key_pair_get_all_by_user(context.elevated(), context.user_id) - def get_credentials(self, user, project=None, use_dmz=True): """Get credential zip for user in project""" if not isinstance(user, User): @@ -785,7 +789,7 @@ class AuthManager(object): return read_buffer def get_environment_rc(self, user, project=None, use_dmz=True): - """Get credential zip for user in project""" + """Get environment rc for user in project""" if not isinstance(user, User): user = self.get_user(user) if project is None: diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index d05c099d7..978ffb210 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -16,3 +16,4 @@ export NOVA_API_KEY="%(access)s" export NOVA_USERNAME="%(user)s" export NOVA_PROJECT_ID="%(project)s" export NOVA_URL="%(os)s" +export NOVA_VERSION="1.1" diff --git a/nova/block_device.py b/nova/block_device.py new file mode 100644 index 000000000..8d95e0029 --- /dev/null +++ b/nova/block_device.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Isaku Yamahata <yamahata@valinux co jp> +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re + + +def properties_root_device_name(properties): + """get root device name from image meta data. + If it isn't specified, return None. + """ + root_device_name = None + + # NOTE(yamahata): see image_service.s3.s3create() + for bdm in properties.get('mappings', []): + if bdm['virtual'] == 'root': + root_device_name = bdm['device'] + + # NOTE(yamahata): register_image's command line can override + # <machine>.manifest.xml + if 'root_device_name' in properties: + root_device_name = properties['root_device_name'] + + return root_device_name + + +_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$') + + +def is_ephemeral(device_name): + return _ephemeral.match(device_name) + + +def ephemeral_num(ephemeral_name): + assert is_ephemeral(ephemeral_name) + return int(_ephemeral.sub('\\1', ephemeral_name)) + + +def is_swap_or_ephemeral(device_name): + return device_name == 'swap' or is_ephemeral(device_name) + + +def mappings_prepend_dev(mappings): + """Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type""" + for m in mappings: + virtual = m['virtual'] + if (is_swap_or_ephemeral(virtual) and + (not m['device'].startswith('/'))): + m['device'] = '/dev/' + m['device'] + return mappings + + +_dev = re.compile('^/dev/') + + +def strip_dev(device_name): + """remove leading '/dev/'""" + return _dev.sub('', device_name) diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 7844d31e1..2c4673f9e 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -96,8 +96,8 @@ class CloudPipe(object): def launch_vpn_instance(self, project_id): LOG.debug(_("Launching VPN for %s") % (project_id)) project = self.manager.get_project(project_id) - ctxt = context.RequestContext(user=project.project_manager, - project=project) + ctxt = context.RequestContext(user=project.project_manager_id, + project=project.id) key_name = self.setup_key_pair(ctxt) group_name = self.setup_security_group(ctxt) @@ -112,11 +112,11 @@ class CloudPipe(object): security_group=[group_name]) def setup_security_group(self, context): - group_name = '%s%s' % (context.project.id, FLAGS.vpn_key_suffix) - if db.security_group_exists(context, context.project.id, group_name): + group_name = '%s%s' % (context.project_id, FLAGS.vpn_key_suffix) + if db.security_group_exists(context, context.project_id, group_name): return group_name - group = {'user_id': context.user.id, - 'project_id': context.project.id, + group = {'user_id': context.user_id, + 'project_id': context.project_id, 'name': group_name, 'description': 'Group for vpn'} group_ref = db.security_group_create(context, group) @@ -137,19 +137,16 @@ class CloudPipe(object): return group_name def setup_key_pair(self, context): - key_name = '%s%s' % (context.project.id, FLAGS.vpn_key_suffix) + key_name = '%s%s' % (context.project_id, FLAGS.vpn_key_suffix) try: - result = cloud._gen_key(context, context.user.id, key_name) + result = cloud._gen_key(context, context.user_id, key_name) private_key = result['private_key'] - try: - key_dir = os.path.join(FLAGS.keys_path, context.user.id) - if not os.path.exists(key_dir): - os.makedirs(key_dir) - key_path = os.path.join(key_dir, '%s.pem' % key_name) - with open(key_path, 'w') as f: - f.write(private_key) - except: - pass - except exception.Duplicate: + key_dir = os.path.join(FLAGS.keys_path, context.user_id) + if not os.path.exists(key_dir): + os.makedirs(key_dir) + key_path = os.path.join(key_dir, '%s.pem' % key_name) + with open(key_path, 'w') as f: + f.write(private_key) + except (exception.Duplicate, os.error, IOError): pass return key_name diff --git a/nova/compute/api.py b/nova/compute/api.py index adc023a4d..09ba1ee76 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -22,6 +22,7 @@ import eventlet import re import time +from nova import block_device from nova import db from nova import exception from nova import flags @@ -32,7 +33,6 @@ from nova import quota from nova import rpc from nova import utils from nova import volume -from nova.api.ec2 import ec2utils from nova.compute import instance_types from nova.compute import power_state from nova.compute.utils import terminate_volumes @@ -218,7 +218,7 @@ class API(base.Base): if reservation_id is None: reservation_id = utils.generate_uid('r') - root_device_name = ec2utils.properties_root_device_name( + root_device_name = block_device.properties_root_device_name( image['properties']) base_options = { @@ -250,34 +250,64 @@ class API(base.Base): return (num_instances, base_options, image) - def _update_image_block_device_mapping(self, elevated_context, instance_id, + @staticmethod + def _ephemeral_size(instance_type, ephemeral_name): + num = block_device.ephemeral_num(ephemeral_name) + + # TODO(yamahata): ephemeralN where N > 0 + # Only ephemeral0 is allowed for now because InstanceTypes + # table only allows single local disk, local_gb. + # In order to enhance it, we need to add a new columns to + # instance_types table. + if num > 0: + return 0 + + return instance_type.get('local_gb') + + def _update_image_block_device_mapping(self, elevated_context, + instance_type, instance_id, mappings): """tell vm driver to create ephemeral/swap device at boot time by updating BlockDeviceMapping """ - for bdm in ec2utils.mappings_prepend_dev(mappings): + instance_type = (instance_type or + instance_types.get_default_instance_type()) + + for bdm in block_device.mappings_prepend_dev(mappings): LOG.debug(_("bdm %s"), bdm) virtual_name = bdm['virtual'] if virtual_name == 'ami' or virtual_name == 'root': continue - assert (virtual_name == 'swap' or - virtual_name.startswith('ephemeral')) + if not block_device.is_swap_or_ephemeral(virtual_name): + continue + + size = 0 + if virtual_name == 'swap': + size = instance_type.get('swap', 0) + elif block_device.is_ephemeral(virtual_name): + size = self._ephemeral_size(instance_type, virtual_name) + + if size == 0: + continue + values = { 'instance_id': instance_id, 'device_name': bdm['device'], - 'virtual_name': virtual_name, } + 'virtual_name': virtual_name, + 'volume_size': size} self.db.block_device_mapping_update_or_create(elevated_context, values) - def _update_block_device_mapping(self, elevated_context, instance_id, + def _update_block_device_mapping(self, elevated_context, + instance_type, instance_id, block_device_mapping): """tell vm driver to attach volume at boot time by updating BlockDeviceMapping """ + LOG.debug(_("block_device_mapping %s"), block_device_mapping) for bdm in block_device_mapping: - LOG.debug(_('bdm %s'), bdm) assert 'device_name' in bdm values = {'instance_id': instance_id} @@ -286,10 +316,18 @@ class API(base.Base): 'no_device'): values[key] = bdm.get(key) + virtual_name = bdm.get('virtual_name') + if (virtual_name is not None and + block_device.is_ephemeral(virtual_name)): + size = self._ephemeral_size(instance_type, virtual_name) + if size == 0: + continue + values['volume_size'] = size + # NOTE(yamahata): NoDevice eliminates devices defined in image # files by command line option. # (--block-device-mapping) - if bdm.get('virtual_name') == 'NoDevice': + if virtual_name == 'NoDevice': values['no_device'] = True for k in ('delete_on_termination', 'volume_id', 'snapshot_id', 'volume_id', 'volume_size', @@ -299,8 +337,8 @@ class API(base.Base): self.db.block_device_mapping_update_or_create(elevated_context, values) - def create_db_entry_for_new_instance(self, context, image, base_options, - security_group, block_device_mapping, num=1): + def create_db_entry_for_new_instance(self, context, instance_type, image, + base_options, security_group, block_device_mapping, num=1): """Create an entry in the DB for this new instance, including any related table updates (such as security group, etc). @@ -333,12 +371,12 @@ class API(base.Base): security_group_id) # BlockDeviceMapping table - self._update_image_block_device_mapping(elevated, instance_id, - image['properties'].get('mappings', [])) - self._update_block_device_mapping(elevated, instance_id, + self._update_image_block_device_mapping(elevated, instance_type, + instance_id, image['properties'].get('mappings', [])) + self._update_block_device_mapping(elevated, instance_type, instance_id, image['properties'].get('block_device_mapping', [])) # override via command line option - self._update_block_device_mapping(elevated, instance_id, + self._update_block_device_mapping(elevated, instance_type, instance_id, block_device_mapping) # Set sane defaults if not specified @@ -356,6 +394,7 @@ class API(base.Base): instance_type, zone_blob, availability_zone, injected_files, admin_password, + image, instance_id=None, num_instances=1): """Send the run_instance request to the schedulers for processing.""" pid = context.project_id @@ -369,6 +408,7 @@ class API(base.Base): filter_class = 'nova.scheduler.host_filter.InstanceTypeFilter' request_spec = { + 'image': image, 'instance_properties': base_options, 'instance_type': instance_type, 'filter': filter_class, @@ -411,6 +451,7 @@ class API(base.Base): instance_type, zone_blob, availability_zone, injected_files, admin_password, + image, num_instances=num_instances) return base_options['reservation_id'] @@ -449,7 +490,8 @@ class API(base.Base): instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): - instance = self.create_db_entry_for_new_instance(context, image, + instance = self.create_db_entry_for_new_instance(context, + instance_type, image, base_options, security_group, block_device_mapping, num=num) instances.append(instance) @@ -459,6 +501,7 @@ class API(base.Base): instance_type, zone_blob, availability_zone, injected_files, admin_password, + image, instance_id=instance_id) return [dict(x.iteritems()) for x in instances] @@ -687,7 +730,7 @@ class API(base.Base): raise instances = None elif project_id or not context.is_admin: - if not context.project: + if not context.project_id: instances = self.db.instance_get_all_by_user( context, context.user_id) else: @@ -886,7 +929,7 @@ class API(base.Base): params = {'migration_id': migration_ref['id']} self._cast_compute_message('revert_resize', context, instance_ref['uuid'], - migration_ref['source_compute'], + migration_ref['dest_compute'], params=params) self.db.migration_update(context, migration_ref['id'], @@ -906,7 +949,7 @@ class API(base.Base): params = {'migration_id': migration_ref['id']} self._cast_compute_message('confirm_resize', context, instance_ref['uuid'], - migration_ref['dest_compute'], + migration_ref['source_compute'], params=params) self.db.migration_update(context, migration_ref['id'], @@ -938,25 +981,22 @@ class API(base.Base): LOG.debug(_("Old instance type %(current_instance_type_name)s, " " new instance type %(new_instance_type_name)s") % locals()) if not new_instance_type: - raise exception.ApiError(_("Requested flavor %(flavor_id)d " - "does not exist") % locals()) + raise exception.FlavorNotFound(flavor_id=flavor_id) current_memory_mb = current_instance_type['memory_mb'] new_memory_mb = new_instance_type['memory_mb'] if current_memory_mb > new_memory_mb: - raise exception.ApiError(_("Invalid flavor: cannot downsize" - "instances")) + raise exception.CannotResizeToSmallerSize() if (current_memory_mb == new_memory_mb) and flavor_id: - raise exception.ApiError(_("Invalid flavor: cannot use" - "the same flavor. ")) + raise exception.CannotResizeToSameSize() instance_ref = self._get_instance(context, instance_id, 'resize') self._cast_scheduler_message(context, {"method": "prep_resize", "args": {"topic": FLAGS.compute_topic, "instance_id": instance_ref['uuid'], - "flavor_id": new_instance_type['id']}}) + "instance_type_id": new_instance_type['id']}}) @scheduler_api.reroute_compute("add_fixed_ip") def add_fixed_ip(self, context, instance_id, network_id): @@ -994,7 +1034,12 @@ class API(base.Base): def set_host_enabled(self, context, host, enabled): """Sets the specified host's ability to accept new instances.""" return self._call_compute_message("set_host_enabled", context, - instance_id=None, host=host, params={"enabled": enabled}) + host=host, params={"enabled": enabled}) + + def host_power_action(self, context, host, action): + """Reboots, shuts down or powers up the host.""" + return self._call_compute_message("host_power_action", context, + host=host, params={"action": action}) @scheduler_api.reroute_compute("diagnostics") def get_diagnostics(self, context, instance_id): diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index c13a629a9..824416514 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -132,11 +132,8 @@ def get_instance_type_by_name(name): # flavors. def get_instance_type_by_flavor_id(flavor_id): """Retrieve instance type by flavor_id.""" - if flavor_id is None: - return get_default_instance_type() + ctxt = context.get_admin_context() try: - ctxt = context.get_admin_context() return db.instance_type_get_by_flavor_id(ctxt, flavor_id) - except exception.DBError, e: - LOG.exception(_('DB error: %s') % e) - raise exception.ApiError(_("Unknown flavor: %s") % flavor_id) + except ValueError: + raise exception.FlavorNotFound(flavor_id=flavor_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 173469bc3..d38213083 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -44,6 +44,8 @@ import functools from eventlet import greenthread +import nova.context +from nova import block_device from nova import exception from nova import flags import nova.image @@ -147,6 +149,31 @@ class ComputeManager(manager.SchedulerDependentManager): def init_host(self): """Initialization for a standalone compute service.""" self.driver.init_host(host=self.host) + context = nova.context.get_admin_context() + instances = self.db.instance_get_all_by_host(context, self.host) + for instance in instances: + inst_name = instance['name'] + db_state = instance['state'] + drv_state = self._update_state(context, instance['id']) + + expect_running = db_state == power_state.RUNNING \ + and drv_state != db_state + + LOG.debug(_('Current state of %(inst_name)s is %(drv_state)s, ' + 'state in DB is %(db_state)s.'), locals()) + + if (expect_running and FLAGS.resume_guests_state_on_host_boot)\ + or FLAGS.start_guests_on_host_boot: + LOG.info(_('Rebooting instance %(inst_name)s after ' + 'nova-compute restart.'), locals()) + self.reboot_instance(context, instance['id']) + elif drv_state == power_state.RUNNING: + # Hyper-V and VMWareAPI drivers will raise and exception + try: + self.driver.ensure_filtering_rules_for_instance(instance) + except NotImplementedError: + LOG.warning(_('Hypervisor driver does not ' + 'support firewall rules')) def _update_state(self, context, instance_id, state=None): """Update the state of an instance from the driver info.""" @@ -154,6 +181,7 @@ class ComputeManager(manager.SchedulerDependentManager): if state is None: try: + LOG.debug(_('Checking state of %s'), instance_ref['name']) info = self.driver.get_info(instance_ref['name']) except exception.NotFound: info = None @@ -164,6 +192,7 @@ class ComputeManager(manager.SchedulerDependentManager): state = power_state.FAILED self.db.instance_set_state(context, instance_id, state) + return state def _update_launched_at(self, context, instance_id, launched_at=None): """Update the launched_at parameter of the given instance.""" @@ -232,6 +261,8 @@ class ComputeManager(manager.SchedulerDependentManager): volume_api = volume.API() block_device_mapping = [] + swap = None + ephemerals = [] for bdm in self.db.block_device_mapping_get_all_by_instance( context, instance_id): LOG.debug(_("setting up bdm %s"), bdm) @@ -239,11 +270,18 @@ class ComputeManager(manager.SchedulerDependentManager): if bdm['no_device']: continue if bdm['virtual_name']: - # TODO(yamahata): - # block devices for swap and ephemeralN will be - # created by virt driver locally in compute node. - assert (bdm['virtual_name'] == 'swap' or - bdm['virtual_name'].startswith('ephemeral')) + virtual_name = bdm['virtual_name'] + device_name = bdm['device_name'] + assert block_device.is_swap_or_ephemeral(virtual_name) + if virtual_name == 'swap': + swap = {'device_name': device_name, + 'swap_size': bdm['volume_size']} + elif block_device.is_ephemeral(virtual_name): + eph = {'num': block_device.ephemeral_num(virtual_name), + 'virtual_name': virtual_name, + 'device_name': device_name, + 'size': bdm['volume_size']} + ephemerals.append(eph) continue if ((bdm['snapshot_id'] is not None) and @@ -279,7 +317,7 @@ class ComputeManager(manager.SchedulerDependentManager): 'mount_device': bdm['device_name']}) - return block_device_mapping + return (swap, ephemerals, block_device_mapping) def _run_instance(self, context, instance_id, **kwargs): """Launch a new instance with specified options.""" @@ -320,13 +358,21 @@ class ComputeManager(manager.SchedulerDependentManager): # all vif creation and network injection, maybe this is correct network_info = [] - bd_mapping = self._setup_block_device_mapping(context, instance_id) + (swap, ephemerals, + block_device_mapping) = self._setup_block_device_mapping( + context, instance_id) + block_device_info = { + 'root_device_name': instance['root_device_name'], + 'swap': swap, + 'ephemerals': ephemerals, + 'block_device_mapping': block_device_mapping} # TODO(vish) check to make sure the availability zone matches self._update_state(context, instance_id, power_state.BUILDING) try: - self.driver.spawn(instance, network_info, bd_mapping) + self.driver.spawn(context, instance, + network_info, block_device_info) except Exception as ex: # pylint: disable=W0702 msg = _("Instance '%(instance_id)s' failed to spawn. Is " "virtualization enabled in the BIOS? Details: " @@ -433,7 +479,7 @@ class ComputeManager(manager.SchedulerDependentManager): network_info = self.network_api.get_instance_nw_info(context, instance_ref) bd_mapping = self._setup_block_device_mapping(context, instance_id) - self.driver.spawn(instance_ref, network_info, bd_mapping) + self.driver.spawn(context, instance_ref, network_info, bd_mapping) self._update_image_ref(context, instance_id, image_ref) self._update_launched_at(context, instance_id) @@ -501,7 +547,7 @@ class ComputeManager(manager.SchedulerDependentManager): 'instance: %(instance_id)s (state: %(state)s ' 'expected: %(running)s)') % locals()) - self.driver.snapshot(instance_ref, image_id) + self.driver.snapshot(context, instance_ref, image_id) if image_type == 'snapshot': if rotation: @@ -660,7 +706,7 @@ class ComputeManager(manager.SchedulerDependentManager): _update_state = lambda result: self._update_state_callback( self, context, instance_id, result) network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.rescue(instance_ref, _update_state, network_info) + self.driver.rescue(context, instance_ref, _update_state, network_info) self._update_state(context, instance_id) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -720,7 +766,8 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref['host']) rpc.cast(context, topic, {'method': 'finish_revert_resize', - 'args': {'migration_id': migration_ref['id']}, + 'args': {'instance_id': instance_ref['uuid'], + 'migration_id': migration_ref['id']}, }) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -736,8 +783,8 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get_by_uuid(context, migration_ref.instance_uuid) - instance_type = self.db.instance_type_get_by_flavor_id(context, - migration_ref['old_flavor_id']) + instance_type = self.db.instance_type_get(context, + migration_ref['old_instance_type_id']) # Just roll back the record. There's no need to resize down since # the 'old' VM already has the preferred attributes @@ -747,7 +794,7 @@ class ComputeManager(manager.SchedulerDependentManager): local_gb=instance_type['local_gb'], instance_type_id=instance_type['id'])) - self.driver.revert_resize(instance_ref) + self.driver.revert_migration(instance_ref) self.db.migration_update(context, migration_id, {'status': 'reverted'}) usage_info = utils.usage_from_instance(instance_ref) @@ -758,7 +805,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock - def prep_resize(self, context, instance_id, flavor_id): + def prep_resize(self, context, instance_id, instance_type_id): """Initiates the process of moving a running instance to another host. Possibly changes the RAM and disk size in the process. @@ -777,16 +824,16 @@ class ComputeManager(manager.SchedulerDependentManager): old_instance_type = self.db.instance_type_get(context, instance_ref['instance_type_id']) - new_instance_type = self.db.instance_type_get_by_flavor_id(context, - flavor_id) + new_instance_type = self.db.instance_type_get(context, + instance_type_id) migration_ref = self.db.migration_create(context, {'instance_uuid': instance_ref['uuid'], 'source_compute': instance_ref['host'], 'dest_compute': FLAGS.host, 'dest_host': self.driver.get_host_ip_addr(), - 'old_flavor_id': old_instance_type['flavorid'], - 'new_flavor_id': flavor_id, + 'old_instance_type_id': old_instance_type['id'], + 'new_instance_type_id': instance_type_id, 'status': 'pre-migrating'}) LOG.audit(_('instance %s: migrating'), instance_ref['uuid'], @@ -845,20 +892,27 @@ class ComputeManager(manager.SchedulerDependentManager): """ migration_ref = self.db.migration_get(context, migration_id) + + resize_instance = False instance_ref = self.db.instance_get_by_uuid(context, migration_ref.instance_uuid) - instance_type = self.db.instance_type_get_by_flavor_id(context, - migration_ref['new_flavor_id']) - self.db.instance_update(context, instance_ref.uuid, - dict(instance_type_id=instance_type['id'], - memory_mb=instance_type['memory_mb'], - vcpus=instance_type['vcpus'], - local_gb=instance_type['local_gb'])) + if migration_ref['old_instance_type_id'] != \ + migration_ref['new_instance_type_id']: + instance_type = self.db.instance_type_get(context, + migration_ref['new_instance_type_id']) + self.db.instance_update(context, instance_ref.uuid, + dict(instance_type_id=instance_type['id'], + memory_mb=instance_type['memory_mb'], + vcpus=instance_type['vcpus'], + local_gb=instance_type['local_gb'])) + resize_instance = True instance_ref = self.db.instance_get_by_uuid(context, instance_ref.uuid) + network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.finish_resize(instance_ref, disk_info, network_info) + self.driver.finish_migration(context, instance_ref, disk_info, + network_info, resize_instance) self.db.migration_update(context, migration_id, {'status': 'finished', }) @@ -922,8 +976,12 @@ class ComputeManager(manager.SchedulerDependentManager): result)) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) - def set_host_enabled(self, context, instance_id=None, host=None, - enabled=None): + def host_power_action(self, context, host=None, action=None): + """Reboots, shuts down or powers up the host.""" + return self.driver.host_power_action(host, action) + + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) + def set_host_enabled(self, context, host=None, enabled=None): """Sets the specified host's ability to accept new instances.""" return self.driver.set_host_enabled(host, enabled) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py deleted file mode 100644 index 9d8e2a25d..000000000 --- a/nova/compute/monitor.py +++ /dev/null @@ -1,435 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Instance Monitoring: - - Optionally may be run on each compute node. Provides RRD - based statistics and graphs and makes them internally available - in the object store. -""" - -import datetime -import os -import time - -import boto -import boto.s3 -import rrdtool -from twisted.internet import task -from twisted.application import service - -from nova import flags -from nova import log as logging -from nova import utils -from nova.virt import connection as virt_connection - - -FLAGS = flags.FLAGS -flags.DEFINE_integer('monitoring_instances_delay', 5, - 'Sleep time between updates') -flags.DEFINE_integer('monitoring_instances_step', 300, - 'Interval of RRD updates') -flags.DEFINE_string('monitoring_rrd_path', '$state_path/monitor/instances', - 'Location of RRD files') - - -RRD_VALUES = { - 'cpu': [ - 'DS:cpu:GAUGE:600:0:100', - 'RRA:AVERAGE:0.5:1:800', - 'RRA:AVERAGE:0.5:6:800', - 'RRA:AVERAGE:0.5:24:800', - 'RRA:AVERAGE:0.5:288:800', - 'RRA:MAX:0.5:1:800', - 'RRA:MAX:0.5:6:800', - 'RRA:MAX:0.5:24:800', - 'RRA:MAX:0.5:288:800', - ], - 'net': [ - 'DS:rx:COUNTER:600:0:1250000', - 'DS:tx:COUNTER:600:0:1250000', - 'RRA:AVERAGE:0.5:1:800', - 'RRA:AVERAGE:0.5:6:800', - 'RRA:AVERAGE:0.5:24:800', - 'RRA:AVERAGE:0.5:288:800', - 'RRA:MAX:0.5:1:800', - 'RRA:MAX:0.5:6:800', - 'RRA:MAX:0.5:24:800', - 'RRA:MAX:0.5:288:800', - ], - 'disk': [ - 'DS:rd:COUNTER:600:U:U', - 'DS:wr:COUNTER:600:U:U', - 'RRA:AVERAGE:0.5:1:800', - 'RRA:AVERAGE:0.5:6:800', - 'RRA:AVERAGE:0.5:24:800', - 'RRA:AVERAGE:0.5:288:800', - 'RRA:MAX:0.5:1:800', - 'RRA:MAX:0.5:6:800', - 'RRA:MAX:0.5:24:800', - 'RRA:MAX:0.5:444:800', - ]} - - -utcnow = utils.utcnow - - -LOG = logging.getLogger('nova.compute.monitor') - - -def update_rrd(instance, name, data): - """ - Updates the specified RRD file. - """ - filename = os.path.join(instance.get_rrd_path(), '%s.rrd' % name) - - if not os.path.exists(filename): - init_rrd(instance, name) - - timestamp = int(time.mktime(utcnow().timetuple())) - rrdtool.update(filename, '%d:%s' % (timestamp, data)) - - -def init_rrd(instance, name): - """ - Initializes the specified RRD file. - """ - path = os.path.join(FLAGS.monitoring_rrd_path, instance.instance_id) - - if not os.path.exists(path): - os.makedirs(path) - - filename = os.path.join(path, '%s.rrd' % name) - - if not os.path.exists(filename): - rrdtool.create( - filename, - '--step', '%d' % FLAGS.monitoring_instances_step, - '--start', '0', - *RRD_VALUES[name]) - - -def graph_cpu(instance, duration): - """ - Creates a graph of cpu usage for the specified instance and duration. - """ - path = instance.get_rrd_path() - filename = os.path.join(path, 'cpu-%s.png' % duration) - - rrdtool.graph( - filename, - '--disable-rrdtool-tag', - '--imgformat', 'PNG', - '--width', '400', - '--height', '120', - '--start', 'now-%s' % duration, - '--vertical-label', '% cpu used', - '-l', '0', - '-u', '100', - 'DEF:cpu=%s:cpu:AVERAGE' % os.path.join(path, 'cpu.rrd'), - 'AREA:cpu#eacc00:% CPU',) - - store_graph(instance.instance_id, filename) - - -def graph_net(instance, duration): - """ - Creates a graph of network usage for the specified instance and duration. - """ - path = instance.get_rrd_path() - filename = os.path.join(path, 'net-%s.png' % duration) - - rrdtool.graph( - filename, - '--disable-rrdtool-tag', - '--imgformat', 'PNG', - '--width', '400', - '--height', '120', - '--start', 'now-%s' % duration, - '--vertical-label', 'bytes/s', - '--logarithmic', - '--units', 'si', - '--lower-limit', '1000', - '--rigid', - 'DEF:rx=%s:rx:AVERAGE' % os.path.join(path, 'net.rrd'), - 'DEF:tx=%s:tx:AVERAGE' % os.path.join(path, 'net.rrd'), - 'AREA:rx#00FF00:In traffic', - 'LINE1:tx#0000FF:Out traffic',) - - store_graph(instance.instance_id, filename) - - -def graph_disk(instance, duration): - """ - Creates a graph of disk usage for the specified duration. - """ - path = instance.get_rrd_path() - filename = os.path.join(path, 'disk-%s.png' % duration) - - rrdtool.graph( - filename, - '--disable-rrdtool-tag', - '--imgformat', 'PNG', - '--width', '400', - '--height', '120', - '--start', 'now-%s' % duration, - '--vertical-label', 'bytes/s', - '--logarithmic', - '--units', 'si', - '--lower-limit', '1000', - '--rigid', - 'DEF:rd=%s:rd:AVERAGE' % os.path.join(path, 'disk.rrd'), - 'DEF:wr=%s:wr:AVERAGE' % os.path.join(path, 'disk.rrd'), - 'AREA:rd#00FF00:Read', - 'LINE1:wr#0000FF:Write',) - - store_graph(instance.instance_id, filename) - - -def store_graph(instance_id, filename): - """ - Transmits the specified graph file to internal object store on cloud - controller. - """ - # TODO(devcamcar): Need to use an asynchronous method to make this - # connection. If boto has some separate method that generates - # the request it would like to make and another method to parse - # the response we can make our own client that does the actual - # request and hands it off to the response parser. - s3 = boto.s3.connection.S3Connection( - aws_access_key_id=FLAGS.aws_access_key_id, - aws_secret_access_key=FLAGS.aws_secret_access_key, - is_secure=False, - calling_format=boto.s3.connection.OrdinaryCallingFormat(), - port=FLAGS.s3_port, - host=FLAGS.s3_host) - bucket_name = '_%s.monitor' % instance_id - - # Object store isn't creating the bucket like it should currently - # when it is first requested, so have to catch and create manually. - try: - bucket = s3.get_bucket(bucket_name) - except Exception: - bucket = s3.create_bucket(bucket_name) - - key = boto.s3.Key(bucket) - key.key = os.path.basename(filename) - key.set_contents_from_filename(filename) - - -class Instance(object): - def __init__(self, conn, instance_id): - self.conn = conn - self.instance_id = instance_id - self.last_updated = datetime.datetime.min - self.cputime = 0 - self.cputime_last_updated = None - - init_rrd(self, 'cpu') - init_rrd(self, 'net') - init_rrd(self, 'disk') - - def needs_update(self): - """ - Indicates whether this instance is due to have its statistics updated. - """ - delta = utcnow() - self.last_updated - return delta.seconds >= FLAGS.monitoring_instances_step - - def update(self): - """ - Updates the instances statistics and stores the resulting graphs - in the internal object store on the cloud controller. - """ - LOG.debug(_('updating %s...'), self.instance_id) - - try: - data = self.fetch_cpu_stats() - if data is not None: - LOG.debug('CPU: %s', data) - update_rrd(self, 'cpu', data) - - data = self.fetch_net_stats() - LOG.debug('NET: %s', data) - update_rrd(self, 'net', data) - - data = self.fetch_disk_stats() - LOG.debug('DISK: %s', data) - update_rrd(self, 'disk', data) - - # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls - # and make the methods @defer.inlineCallbacks. - graph_cpu(self, '1d') - graph_cpu(self, '1w') - graph_cpu(self, '1m') - - graph_net(self, '1d') - graph_net(self, '1w') - graph_net(self, '1m') - - graph_disk(self, '1d') - graph_disk(self, '1w') - graph_disk(self, '1m') - except Exception: - LOG.exception(_('unexpected error during update')) - - self.last_updated = utcnow() - - def get_rrd_path(self): - """ - Returns the path to where RRD files are stored. - """ - return os.path.join(FLAGS.monitoring_rrd_path, self.instance_id) - - def fetch_cpu_stats(self): - """ - Returns cpu usage statistics for this instance. - """ - info = self.conn.get_info(self.instance_id) - - # Get the previous values. - cputime_last = self.cputime - cputime_last_updated = self.cputime_last_updated - - # Get the raw CPU time used in nanoseconds. - self.cputime = float(info['cpu_time']) - self.cputime_last_updated = utcnow() - - LOG.debug('CPU: %d', self.cputime) - - # Skip calculation on first pass. Need delta to get a meaningful value. - if cputime_last_updated is None: - return None - - # Calculate the number of seconds between samples. - d = self.cputime_last_updated - cputime_last_updated - t = d.days * 86400 + d.seconds - - LOG.debug('t = %d', t) - - # Calculate change over time in number of nanoseconds of CPU time used. - cputime_delta = self.cputime - cputime_last - - LOG.debug('cputime_delta = %s', cputime_delta) - - # Get the number of virtual cpus in this domain. - vcpus = int(info['num_cpu']) - - LOG.debug('vcpus = %d', vcpus) - - # Calculate CPU % used and cap at 100. - return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100) - - def fetch_disk_stats(self): - """ - Returns disk usage statistics for this instance. - """ - rd = 0 - wr = 0 - - disks = self.conn.get_disks(self.instance_id) - - # Aggregate the read and write totals. - for disk in disks: - try: - rd_req, rd_bytes, wr_req, wr_bytes, errs = \ - self.conn.block_stats(self.instance_id, disk) - rd += rd_bytes - wr += wr_bytes - except TypeError: - iid = self.instance_id - LOG.error(_('Cannot get blockstats for "%(disk)s"' - ' on "%(iid)s"') % locals()) - raise - - return '%d:%d' % (rd, wr) - - def fetch_net_stats(self): - """ - Returns network usage statistics for this instance. - """ - rx = 0 - tx = 0 - - interfaces = self.conn.get_interfaces(self.instance_id) - - # Aggregate the in and out totals. - for interface in interfaces: - try: - stats = self.conn.interface_stats(self.instance_id, interface) - rx += stats[0] - tx += stats[4] - except TypeError: - iid = self.instance_id - LOG.error(_('Cannot get ifstats for "%(interface)s"' - ' on "%(iid)s"') % locals()) - raise - - return '%d:%d' % (rx, tx) - - -class InstanceMonitor(object, service.Service): - """ - Monitors the running instances of the current machine. - """ - - def __init__(self): - """ - Initialize the monitoring loop. - """ - self._instances = {} - self._loop = task.LoopingCall(self.updateInstances) - - def startService(self): - self._instances = {} - self._loop.start(interval=FLAGS.monitoring_instances_delay) - service.Service.startService(self) - - def stopService(self): - self._loop.stop() - service.Service.stopService(self) - - def updateInstances(self): - """ - Update resource usage for all running instances. - """ - try: - conn = virt_connection.get_connection(read_only=True) - except Exception, exn: - LOG.exception(_('unexpected exception getting connection')) - time.sleep(FLAGS.monitoring_instances_delay) - return - - domain_ids = conn.list_instances() - try: - self.updateInstances_(conn, domain_ids) - except Exception, exn: - LOG.exception('updateInstances_') - - def updateInstances_(self, conn, domain_ids): - for domain_id in domain_ids: - if not domain_id in self._instances: - instance = Instance(conn, domain_id) - self._instances[domain_id] = instance - LOG.debug(_('Found instance: %s'), domain_id) - - for key in self._instances.keys(): - instance = self._instances[key] - if instance.needs_update(): - instance.update() diff --git a/nova/context.py b/nova/context.py index 99085ed75..b917a1d81 100644 --- a/nova/context.py +++ b/nova/context.py @@ -18,9 +18,8 @@ """RequestContext: context for requests that persist through all of nova.""" -import random +import uuid -from nova import exception from nova import utils @@ -31,86 +30,54 @@ class RequestContext(object): """ - def __init__(self, user, project, is_admin=None, read_deleted=False, - remote_address=None, timestamp=None, request_id=None): - if hasattr(user, 'id'): - self._user = user - self.user_id = user.id - else: - self._user = None - self.user_id = user - if hasattr(project, 'id'): - self._project = project - self.project_id = project.id - else: - self._project = None - self.project_id = project - if is_admin is None: - if self.user_id and self.user: - self.is_admin = self.user.is_admin() - else: - self.is_admin = False - else: - self.is_admin = is_admin + def __init__(self, user_id, project_id, is_admin=None, read_deleted=False, + roles=None, remote_address=None, timestamp=None, + request_id=None, auth_token=None): + self.user_id = user_id + self.project_id = project_id + self.roles = roles or [] + self.is_admin = is_admin + if self.is_admin is None: + self.admin = 'admin' in self.roles self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = utils.utcnow() - if isinstance(timestamp, str) or isinstance(timestamp, unicode): - timestamp = utils.parse_isotime(timestamp) + if isinstance(timestamp, basestring): + timestamp = utils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: - chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-' - request_id = ''.join([random.choice(chars) for x in xrange(20)]) + request_id = unicode(uuid.uuid4()) self.request_id = request_id - - @property - def user(self): - # NOTE(vish): Delay import of manager, so that we can import this - # file from manager. - from nova.auth import manager - if not self._user: - try: - self._user = manager.AuthManager().get_user(self.user_id) - except exception.NotFound: - pass - return self._user - - @property - def project(self): - # NOTE(vish): Delay import of manager, so that we can import this - # file from manager. - from nova.auth import manager - if not self._project: - try: - auth_manager = manager.AuthManager() - self._project = auth_manager.get_project(self.project_id) - except exception.NotFound: - pass - return self._project + self.auth_token = auth_token def to_dict(self): - return {'user': self.user_id, - 'project': self.project_id, + return {'user_id': self.user_id, + 'project_id': self.project_id, 'is_admin': self.is_admin, 'read_deleted': self.read_deleted, + 'roles': self.roles, 'remote_address': self.remote_address, - 'timestamp': utils.isotime(self.timestamp), - 'request_id': self.request_id} + 'timestamp': utils.strtime(self.timestamp), + 'request_id': self.request_id, + 'auth_token': self.auth_token} @classmethod def from_dict(cls, values): return cls(**values) - def elevated(self, read_deleted=False): + def elevated(self, read_deleted=None): """Return a version of this context with admin flag set.""" - return RequestContext(self.user_id, - self.project_id, - True, - read_deleted, - self.remote_address, - self.timestamp, - self.request_id) + rd = self.read_deleted if read_deleted is None else read_deleted + return RequestContext(user_id=self.user_id, + project_id=self.project_id, + is_admin=True, + read_deleted=rd, + roles=self.roles, + remote_address=self.remote_address, + timestamp=self.timestamp, + request_id=self.request_id, + auth_token=self.auth_token) def get_admin_context(read_deleted=False): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index d7810098a..e41bca8bd 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -20,6 +20,7 @@ Implementation of SQLAlchemy backend. """ import warnings +from nova import block_device from nova import db from nova import exception from nova import flags @@ -62,7 +63,7 @@ def is_user_context(context): def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): - if not context.project: + if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() @@ -71,7 +72,7 @@ def authorize_project_context(context, project_id): def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): - if not context.user: + if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() @@ -1312,7 +1313,7 @@ def instance_get_fixed_addresses_v6(context, instance_id): # combine prefixes, macs, and project_id into (prefix,mac,p_id) tuples prefix_mac_tuples = zip(prefixes, macs, [project_id for m in macs]) # return list containing ipv6 address for each tuple - return [ipv6.to_global_ipv6(*t) for t in prefix_mac_tuples] + return [ipv6.to_global(*t) for t in prefix_mac_tuples] @require_context @@ -1426,9 +1427,14 @@ def instance_action_create(context, values): def instance_get_actions(context, instance_id): """Return the actions associated to the given instance id""" session = get_session() + + if utils.is_uuid_like(instance_id): + instance = instance_get_by_uuid(context, instance_id, session) + instance_id = instance.id + return session.query(models.InstanceActions).\ filter_by(instance_id=instance_id).\ - all() + all() ################### @@ -1680,7 +1686,10 @@ def network_get_by_bridge(context, bridge): def network_get_by_cidr(context, cidr): session = get_session() result = session.query(models.Network).\ - filter_by(cidr=cidr).first() + filter(or_(models.Network.cidr == cidr, + models.Network.cidr_v6 == cidr)).\ + filter_by(deleted=False).\ + first() if not result: raise exception.NetworkNotFoundForCidr(cidr=cidr) @@ -2264,6 +2273,20 @@ def block_device_mapping_update_or_create(context, values): else: result.update(values) + # NOTE(yamahata): same virtual device name can be specified multiple + # times. So delete the existing ones. + virtual_name = values['virtual_name'] + if (virtual_name is not None and + block_device.is_swap_or_ephemeral(virtual_name)): + session.query(models.BlockDeviceMapping).\ + filter_by(instance_id=values['instance_id']).\ + filter_by(virtual_name=virtual_name).\ + filter(models.BlockDeviceMapping.device_name != + values['device_name']).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + @require_context def block_device_mapping_get_all_by_instance(context, instance_id): @@ -3041,13 +3064,18 @@ def instance_type_get_by_name(context, name): @require_context def instance_type_get_by_flavor_id(context, id): """Returns a dict describing specific flavor_id""" + try: + flavor_id = int(id) + except ValueError: + raise exception.FlavorNotFound(flavor_id=id) + session = get_session() inst_type = session.query(models.InstanceTypes).\ options(joinedload('extra_specs')).\ - filter_by(flavorid=int(id)).\ + filter_by(flavorid=flavor_id).\ first() if not inst_type: - raise exception.FlavorNotFound(flavor_id=id) + raise exception.FlavorNotFound(flavor_id=flavor_id) else: return _dict_with_extra_specs(inst_type) @@ -3172,8 +3200,9 @@ def instance_metadata_delete_all(context, instance_id): @require_context @require_instance_exists -def instance_metadata_get_item(context, instance_id, key): - session = get_session() +def instance_metadata_get_item(context, instance_id, key, session=None): + if not session: + session = get_session() meta_result = session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ @@ -3199,7 +3228,7 @@ def instance_metadata_update_or_create(context, instance_id, metadata): try: meta_ref = instance_metadata_get_item(context, instance_id, key, session) - except: + except exception.InstanceMetadataNotFound, e: meta_ref = models.InstanceMetadata() meta_ref.update({"key": key, "value": value, "instance_id": instance_id, @@ -3247,8 +3276,8 @@ def agent_build_destroy(context, agent_build_id): with session.begin(): session.query(models.AgentBuild).\ filter_by(id=agent_build_id).\ - update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + update({'deleted': True, + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -3294,10 +3323,12 @@ def instance_type_extra_specs_delete(context, instance_type_id, key): @require_context -def instance_type_extra_specs_get_item(context, instance_type_id, key): - session = get_session() +def instance_type_extra_specs_get_item(context, instance_type_id, key, + session=None): + if not session: + session = get_session() - sppec_result = session.query(models.InstanceTypeExtraSpecs).\ + spec_result = session.query(models.InstanceTypeExtraSpecs).\ filter_by(instance_type_id=instance_type_id).\ filter_by(key=key).\ filter_by(deleted=False).\ @@ -3321,7 +3352,7 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id, instance_type_id, key, session) - except: + except exception.InstanceTypeExtraSpecsNotFound, e: spec_ref = models.InstanceTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "instance_type_id": instance_type_id, diff --git a/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py new file mode 100644 index 000000000..f3244033b --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/036_change_flavor_id_in_migrations.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.from sqlalchemy import * + +from sqlalchemy import Column, Integer, MetaData, Table + + +meta = MetaData() + + +# +# Tables to alter +# +# + +old_flavor_id = Column('old_flavor_id', Integer()) +new_flavor_id = Column('new_flavor_id', Integer()) +old_instance_type_id = Column('old_instance_type_id', Integer()) +new_instance_type_id = Column('new_instance_type_id', Integer()) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + instance_types = Table('instance_types', meta, autoload=True) + migrations = Table('migrations', meta, autoload=True) + migrations.create_column(old_instance_type_id) + migrations.create_column(new_instance_type_id) + + # Convert flavor_id to instance_type_id + for instance_type in migrate_engine.execute(instance_types.select()): + migrate_engine.execute(migrations.update()\ + .where(migrations.c.old_flavor_id == instance_type.flavorid)\ + .values(old_instance_type_id=instance_type.id)) + migrate_engine.execute(migrations.update()\ + .where(migrations.c.new_flavor_id == instance_type.flavorid)\ + .values(new_instance_type_id=instance_type.id)) + + migrations.c.old_flavor_id.drop() + migrations.c.new_flavor_id.drop() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instance_types = Table('instance_types', meta, autoload=True) + migrations = Table('migrations', meta, autoload=True) + migrations.create_column(old_flavor_id) + migrations.create_column(new_flavor_id) + + # Convert instance_type_id to flavor_id + for instance_type in migrate_engine.execute(instance_types.select()): + migrate_engine.execute(migrations.update()\ + .where(migrations.c.old_instance_type_id == instance_type.id)\ + .values(old_flavor_id=instance_type.flavorid)) + migrate_engine.execute(migrations.update()\ + .where(migrations.c.new_instance_type_id == instance_type.id)\ + .values(new_flavor_id=instance_type.flavorid)) + + migrations.c.old_instance_type_id.drop() + migrations.c.new_instance_type_id.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 9f1967d69..266758fb2 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -177,14 +177,6 @@ class Instance(BASE, NovaBase): user_id = Column(String(255)) project_id = Column(String(255)) - @property - def user(self): - return auth.manager.AuthManager().get_user(self.user_id) - - @property - def project(self): - return auth.manager.AuthManager().get_project(self.project_id) - image_ref = Column(String(255)) kernel_id = Column(String(255)) ramdisk_id = Column(String(255)) @@ -210,7 +202,7 @@ class Instance(BASE, NovaBase): hostname = Column(String(255)) host = Column(String(255)) # , ForeignKey('hosts.id')) - # aka flavor_id + # *not* flavor_id instance_type_id = Column(Integer) user_data = Column(Text) @@ -465,14 +457,6 @@ class SecurityGroup(BASE, NovaBase): 'Instance.deleted == False)', backref='security_groups') - @property - def user(self): - return auth.manager.AuthManager().get_user(self.user_id) - - @property - def project(self): - return auth.manager.AuthManager().get_project(self.project_id) - class SecurityGroupIngressRule(BASE, NovaBase): """Represents a rule in a security group.""" @@ -532,8 +516,8 @@ class Migration(BASE, NovaBase): source_compute = Column(String(255)) dest_compute = Column(String(255)) dest_host = Column(String(255)) - old_flavor_id = Column(Integer()) - new_flavor_id = Column(Integer()) + old_instance_type_id = Column(Integer()) + new_instance_type_id = Column(Integer()) instance_uuid = Column(String(255), ForeignKey('instances.uuid'), nullable=True) #TODO(_cerberus_): enum diff --git a/nova/exception.py b/nova/exception.py index 8c9b45a80..0d60cb0bf 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -25,6 +25,7 @@ SHOULD include dedicated exception logging. """ from functools import wraps +import sys from nova import log as logging @@ -96,6 +97,10 @@ def wrap_exception(notifier=None, publisher_id=None, event_type=None, try: return f(*args, **kw) except Exception, e: + # Save exception since it can be clobbered during processing + # below before we can re-raise + exc_info = sys.exc_info() + if notifier: payload = dict(args=args, exception=e) payload.update(kw) @@ -122,7 +127,9 @@ def wrap_exception(notifier=None, publisher_id=None, event_type=None, LOG.exception(_('Uncaught exception')) #logging.error(traceback.extract_stack(exc_traceback)) raise Error(str(e)) - raise + + # re-raise original exception since it may have been clobbered + raise exc_info[0], exc_info[1], exc_info[2] return wraps(f)(wrapped) return inner @@ -150,6 +157,10 @@ class NovaException(Exception): return self._error_string +class ImagePaginationFailed(NovaException): + message = _("Failed to paginate through images from image service") + + class VirtualInterfaceCreateException(NovaException): message = _("Virtual Interface creation failed") @@ -198,6 +209,12 @@ class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") +# Cannot be templated as the error syntax varies. +# msg needs to be constructed when raised. +class InvalidParameterValue(Invalid): + message = _("%(err)s") + + class InstanceNotRunning(Invalid): message = _("Instance %(instance_id)s is not running.") @@ -692,3 +709,11 @@ class PasteConfigNotFound(NotFound): class PasteAppNotFound(NotFound): message = _("Could not load paste app '%(name)s' from %(path)s") + + +class CannotResizeToSameSize(NovaException): + message = _("When resizing, instances must change size!") + + +class CannotResizeToSmallerSize(NovaException): + message = _("Resizing to a smaller size is not supported.") diff --git a/nova/flags.py b/nova/flags.py index fa6d8860a..eb6366ed9 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -317,7 +317,7 @@ DEFINE_string('osapi_extensions_path', '/var/lib/nova/extensions', DEFINE_string('osapi_host', '$my_ip', 'ip of api server') DEFINE_string('osapi_scheme', 'http', 'prefix for openstack') DEFINE_integer('osapi_port', 8774, 'OpenStack API port') -DEFINE_string('osapi_path', '/v1.0/', 'suffix for openstack') +DEFINE_string('osapi_path', '/v1.1/', 'suffix for openstack') DEFINE_integer('osapi_max_limit', 1000, 'max number of items returned in a collection response') @@ -387,3 +387,8 @@ DEFINE_list('zone_capabilities', 'Key/Multi-value list representng capabilities of this zone') DEFINE_string('build_plan_encryption_key', None, '128bit (hex) encryption key for scheduler build plans.') + +DEFINE_bool('start_guests_on_host_boot', False, + 'Whether to restart guests when the host reboots') +DEFINE_bool('resume_guests_state_on_host_boot', False, + 'Whether to start guests, that was running before the host reboot') diff --git a/nova/image/__init__.py b/nova/image/__init__.py index a27d649d4..5447c8a3a 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -35,6 +35,7 @@ def _parse_image_ref(image_href): :param image_href: href of an image :returns: a tuple of the form (image_id, host, port) + :raises ValueError """ o = urlparse(image_href) @@ -72,7 +73,7 @@ def get_glance_client(image_href): try: (image_id, host, port) = _parse_image_ref(image_href) - except: + except ValueError: raise exception.InvalidImageRef(image_href=image_href) glance_client = GlanceClient(host, port) return (glance_client, image_id) diff --git a/nova/image/fake.py b/nova/image/fake.py index 28e912534..97af81711 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -45,9 +45,12 @@ class _FakeImageService(service.BaseImageService): 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, 'status': 'active', - 'container_format': 'ami', - 'disk_format': 'raw', + 'is_public': False, +# 'container_format': 'ami', +# 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel, 'architecture': 'x86_64'}} @@ -56,9 +59,12 @@ class _FakeImageService(service.BaseImageService): 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, 'status': 'active', - 'container_format': 'ami', - 'disk_format': 'raw', + 'is_public': True, +# 'container_format': 'ami', +# 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} @@ -66,9 +72,12 @@ class _FakeImageService(service.BaseImageService): 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, 'status': 'active', - 'container_format': 'ami', - 'disk_format': 'raw', + 'is_public': True, +# 'container_format': 'ami', +# 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} @@ -76,9 +85,12 @@ class _FakeImageService(service.BaseImageService): 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, 'status': 'active', - 'container_format': 'ami', - 'disk_format': 'raw', + 'is_public': True, +# 'container_format': 'ami', +# 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} @@ -86,9 +98,12 @@ class _FakeImageService(service.BaseImageService): 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, + 'deleted_at': None, + 'deleted': False, 'status': 'active', - 'container_format': 'ami', - 'disk_format': 'raw', + 'is_public': True, +# 'container_format': 'ami', +# 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} @@ -101,7 +116,11 @@ class _FakeImageService(service.BaseImageService): def index(self, context, filters=None, marker=None, limit=None): """Returns list of images.""" - return copy.deepcopy(self.images.values()) + retval = [] + for img in self.images.values(): + retval += [dict([(k, v) for k, v in img.iteritems() + if k in ['id', 'name']])] + return retval def detail(self, context, filters=None, marker=None, limit=None): """Return list of detailed image information.""" diff --git a/nova/image/glance.py b/nova/image/glance.py index 5c2dc957b..9060f6a91 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -19,7 +19,9 @@ from __future__ import absolute_import +import copy import datetime +import json import random from glance.common import exception as glance_exception @@ -83,43 +85,79 @@ class GlanceImageService(service.BaseImageService): client = property(_get_client, _set_client) - def index(self, context, filters=None, marker=None, limit=None): + def _set_client_context(self, context): + """Sets the client's auth token.""" + self.client.set_auth_token(context.auth_token) + + def index(self, context, **kwargs): """Calls out to Glance for a list of images available.""" - # NOTE(sirp): We need to use `get_images_detailed` and not - # `get_images` here because we need `is_public` and `properties` - # included so we can filter by user - filtered = [] - filters = filters or {} - if 'is_public' not in filters: - # NOTE(vish): don't filter out private images - filters['is_public'] = 'none' - image_metas = self.client.get_images_detailed(filters=filters, - marker=marker, - limit=limit) + params = self._extract_query_params(kwargs) + image_metas = self._get_images(context, **params) + + images = [] for image_meta in image_metas: + # NOTE(sirp): We need to use `get_images_detailed` and not + # `get_images` here because we need `is_public` and `properties` + # included so we can filter by user if self._is_image_available(context, image_meta): meta_subset = utils.subset_dict(image_meta, ('id', 'name')) - filtered.append(meta_subset) - return filtered + images.append(meta_subset) + return images - def detail(self, context, filters=None, marker=None, limit=None): + def detail(self, context, **kwargs): """Calls out to Glance for a list of detailed image information.""" - filtered = [] - filters = filters or {} - if 'is_public' not in filters: - # NOTE(vish): don't filter out private images - filters['is_public'] = 'none' - image_metas = self.client.get_images_detailed(filters=filters, - marker=marker, - limit=limit) + params = self._extract_query_params(kwargs) + image_metas = self._get_images(context, **params) + + images = [] for image_meta in image_metas: if self._is_image_available(context, image_meta): base_image_meta = self._translate_to_base(image_meta) - filtered.append(base_image_meta) - return filtered + images.append(base_image_meta) + return images + + def _extract_query_params(self, params): + _params = {} + accepted_params = ('filters', 'marker', 'limit', + 'sort_key', 'sort_dir') + for param in accepted_params: + if param in params: + _params[param] = params.get(param) + + return _params + + def _get_images(self, context, **kwargs): + """Get image entitites from images service""" + self._set_client_context(context) + + # ensure filters is a dict + kwargs['filters'] = kwargs.get('filters') or {} + # NOTE(vish): don't filter out private images + kwargs['filters'].setdefault('is_public', 'none') + + return self._fetch_images(self.client.get_images_detailed, **kwargs) + + def _fetch_images(self, fetch_func, **kwargs): + """Paginate through results from glance server""" + images = fetch_func(**kwargs) + + for image in images: + yield image + else: + # break out of recursive loop to end pagination + return + + try: + # attempt to advance the marker in order to fetch next page + kwargs['marker'] = images[-1]['id'] + except KeyError: + raise exception.ImagePaginationFailed() + + self._fetch_images(fetch_func, **kwargs) def show(self, context, image_id): """Returns a dict with image data for the given opaque image id.""" + self._set_client_context(context) try: image_meta = self.client.get_image_meta(image_id) except glance_exception.NotFound: @@ -143,6 +181,7 @@ class GlanceImageService(service.BaseImageService): def get(self, context, image_id, data): """Calls out to Glance for metadata and data and writes data.""" + self._set_client_context(context) try: image_meta, image_chunks = self.client.get_image(image_id) except glance_exception.NotFound: @@ -160,6 +199,7 @@ class GlanceImageService(service.BaseImageService): :raises: AlreadyExists if the image already exist. """ + self._set_client_context(context) # Translate Base -> Service LOG.debug(_('Creating image in Glance. Metadata passed in %s'), image_meta) @@ -182,8 +222,10 @@ class GlanceImageService(service.BaseImageService): :raises: ImageNotFound if the image does not exist. """ + self._set_client_context(context) # NOTE(vish): show is to check if image is available self.show(context, image_id) + image_meta = _convert_to_string(image_meta) try: image_meta = self.client.update_image(image_id, image_meta, data) except glance_exception.NotFound: @@ -198,6 +240,7 @@ class GlanceImageService(service.BaseImageService): :raises: ImageNotFound if the image does not exist. """ + self._set_client_context(context) # NOTE(vish): show is to check if image is available self.show(context, image_id) try: @@ -211,11 +254,19 @@ class GlanceImageService(service.BaseImageService): pass @classmethod + def _translate_to_service(cls, image_meta): + image_meta = super(GlanceImageService, + cls)._translate_to_service(image_meta) + image_meta = _convert_to_string(image_meta) + return image_meta + + @classmethod def _translate_to_base(cls, image_meta): """Override translation to handle conversion to datetime objects.""" image_meta = service.BaseImageService._propertify_metadata( image_meta, cls.SERVICE_IMAGE_ATTRS) image_meta = _convert_timestamps_to_datetimes(image_meta) + image_meta = _convert_from_string(image_meta) return image_meta @@ -241,3 +292,38 @@ def _parse_glance_iso8601_timestamp(timestamp): raise ValueError(_('%(timestamp)s does not follow any of the ' 'signatures: %(ISO_FORMATS)s') % locals()) + + +# TODO(yamahata): use block-device-mapping extension to glance +def _json_loads(properties, attr): + prop = properties[attr] + if isinstance(prop, basestring): + properties[attr] = json.loads(prop) + + +def _json_dumps(properties, attr): + prop = properties[attr] + if not isinstance(prop, basestring): + properties[attr] = json.dumps(prop) + + +_CONVERT_PROPS = ('block_device_mapping', 'mappings') + + +def _convert(method, metadata): + metadata = copy.deepcopy(metadata) # don't touch original metadata + properties = metadata.get('properties') + if properties: + for attr in _CONVERT_PROPS: + if attr in properties: + method(properties, attr) + + return metadata + + +def _convert_from_string(metadata): + return _convert(_json_loads, metadata) + + +def _convert_to_string(metadata): + return _convert(_json_dumps, metadata) diff --git a/nova/image/s3.py b/nova/image/s3.py index c313c7a13..ccbfa89cd 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -34,7 +34,6 @@ from nova import flags from nova import image from nova import log as logging from nova import utils -from nova.auth import manager from nova.image import service from nova.api.ec2 import ec2utils @@ -43,6 +42,10 @@ LOG = logging.getLogger("nova.image.s3") FLAGS = flags.FLAGS flags.DEFINE_string('image_decryption_dir', '/tmp', 'parent dir for tempdir used for image decryption') +flags.DEFINE_string('s3_access_key', 'notchecked', + 'access key to use for s3 server for images') +flags.DEFINE_string('s3_secret_key', 'notchecked', + 'secret key to use for s3 server for images') class S3ImageService(service.BaseImageService): @@ -82,11 +85,10 @@ class S3ImageService(service.BaseImageService): @staticmethod def _conn(context): - # TODO(vish): is there a better way to get creds to sign - # for the user? - access = manager.AuthManager().get_access_key(context.user, - context.project) - secret = str(context.user.secret) + # NOTE(vish): access and secret keys for s3 server are not + # checked in nova-objectstore + access = FLAGS.s3_access_key + secret = FLAGS.s3_secret_key calling = boto.s3.connection.OrdinaryCallingFormat() return boto.s3.connection.S3Connection(aws_access_key_id=access, aws_secret_access_key=secret, diff --git a/nova/log.py b/nova/log.py index 133ee45f8..222b8c5fb 100644 --- a/nova/log.py +++ b/nova/log.py @@ -43,8 +43,8 @@ from nova import version FLAGS = flags.FLAGS flags.DEFINE_string('logging_context_format_string', '%(asctime)s %(levelname)s %(name)s ' - '[%(request_id)s %(user)s ' - '%(project)s] %(message)s', + '[%(request_id)s %(user_id)s ' + '%(project_id)s] %(message)s', 'format string to use for log messages with context') flags.DEFINE_string('logging_default_format_string', '%(asctime)s %(levelname)s %(name)s [-] ' diff --git a/nova/network/manager.py b/nova/network/manager.py index 75fcb0310..61eada96b 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -421,8 +421,11 @@ class NetworkManager(manager.SchedulerDependentManager): kwargs can contain fixed_ips to circumvent another db lookup """ instance_id = kwargs.pop('instance_id') - fixed_ips = kwargs.get('fixed_ips') or \ + try: + fixed_ips = kwargs.get('fixed_ips') or \ self.db.fixed_ip_get_by_instance(context, instance_id) + except exceptions.FixedIpNotFoundForInstance: + fixed_ips = [] LOG.debug(_("network deallocation for instance |%s|"), instance_id, context=context) # deallocate fixed ips @@ -562,17 +565,19 @@ class NetworkManager(manager.SchedulerDependentManager): # with a network, or a cluster of computes with a network # and use that network here with a method like # network_get_by_compute_host - address = self.db.fixed_ip_associate_pool(context.elevated(), - network['id'], - instance_id) - self._do_trigger_security_group_members_refresh_for_instance( + address = None + if network['cidr']: + address = self.db.fixed_ip_associate_pool(context.elevated(), + network['id'], + instance_id) + self._do_trigger_security_group_members_refresh_for_instance( instance_id) - vif = self.db.virtual_interface_get_by_instance_and_network(context, - instance_id, - network['id']) - values = {'allocated': True, - 'virtual_interface_id': vif['id']} - self.db.fixed_ip_update(context, address, values) + get_vif = self.db.virtual_interface_get_by_instance_and_network + vif = get_vif(context, instance_id, network['id']) + values = {'allocated': True, + 'virtual_interface_id': vif['id']} + self.db.fixed_ip_update(context, address, values) + self._setup_network(context, network) return address @@ -631,34 +636,39 @@ class NetworkManager(manager.SchedulerDependentManager): network_size, cidr_v6, gateway_v6, bridge, bridge_interface, dns1=None, dns2=None, **kwargs): """Create networks based on parameters.""" - fixed_net = netaddr.IPNetwork(cidr) - if FLAGS.use_ipv6: + if cidr_v6: fixed_net_v6 = netaddr.IPNetwork(cidr_v6) significant_bits_v6 = 64 network_size_v6 = 1 << 64 - for index in range(num_networks): - start = index * network_size + if cidr: + fixed_net = netaddr.IPNetwork(cidr) significant_bits = 32 - int(math.log(network_size, 2)) - cidr = '%s/%s' % (fixed_net[start], significant_bits) - project_net = netaddr.IPNetwork(cidr) + + for index in range(num_networks): net = {} net['bridge'] = bridge net['bridge_interface'] = bridge_interface net['dns1'] = dns1 net['dns2'] = dns2 - net['cidr'] = cidr - net['multi_host'] = multi_host - net['netmask'] = str(project_net.netmask) - net['gateway'] = str(project_net[1]) - net['broadcast'] = str(project_net.broadcast) - net['dhcp_start'] = str(project_net[2]) + + if cidr: + start = index * network_size + project_net = netaddr.IPNetwork('%s/%s' % (fixed_net[start], + significant_bits)) + net['cidr'] = str(project_net) + net['multi_host'] = multi_host + net['netmask'] = str(project_net.netmask) + net['gateway'] = str(project_net[1]) + net['broadcast'] = str(project_net.broadcast) + net['dhcp_start'] = str(project_net[2]) + if num_networks > 1: net['label'] = '%s_%d' % (label, index) else: net['label'] = label - if FLAGS.use_ipv6: + if cidr_v6: start_v6 = index * network_size_v6 cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], significant_bits_v6) @@ -691,11 +701,11 @@ class NetworkManager(manager.SchedulerDependentManager): # None if network with cidr or cidr_v6 already exists network = self.db.network_create_safe(context, net) - if network: + if not network: + raise ValueError(_('Network already exists!')) + + if network and cidr: self._create_fixed_ips(context, network['id']) - else: - raise ValueError(_('Network with cidr %s already exists') % - cidr) @property def _bottom_reserved_ips(self): # pylint: disable=R0201 diff --git a/nova/notifier/api.py b/nova/notifier/api.py index 98969fd3e..e18f3e280 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -80,6 +80,10 @@ def notify(publisher_id, event_type, priority, payload): if priority not in log_levels: raise BadPriorityException( _('%s not in valid priorities' % priority)) + + # Ensure everything is JSON serializable. + payload = utils.to_primitive(payload, convert_instances=True) + driver = utils.import_object(FLAGS.notification_driver) msg = dict(message_id=str(uuid.uuid4()), publisher_id=publisher_id, diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py new file mode 100644 index 000000000..bdf7f705b --- /dev/null +++ b/nova/rpc/__init__.py @@ -0,0 +1,66 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from nova.utils import import_object +from nova.rpc.common import RemoteError, LOG +from nova import flags + +FLAGS = flags.FLAGS +flags.DEFINE_string('rpc_backend', + 'nova.rpc.amqp', + "The messaging module to use, defaults to AMQP.") + +RPCIMPL = import_object(FLAGS.rpc_backend) + + +def create_connection(new=True): + return RPCIMPL.Connection.instance(new=True) + + +def create_consumer(conn, topic, proxy, fanout=False): + if fanout: + return RPCIMPL.FanoutAdapterConsumer( + connection=conn, + topic=topic, + proxy=proxy) + else: + return RPCIMPL.TopicAdapterConsumer( + connection=conn, + topic=topic, + proxy=proxy) + + +def create_consumer_set(conn, consumers): + return RPCIMPL.ConsumerSet(connection=conn, consumer_list=consumers) + + +def call(context, topic, msg): + return RPCIMPL.call(context, topic, msg) + + +def cast(context, topic, msg): + return RPCIMPL.cast(context, topic, msg) + + +def fanout_cast(context, topic, msg): + return RPCIMPL.fanout_cast(context, topic, msg) + + +def multicall(context, topic, msg): + return RPCIMPL.multicall(context, topic, msg) diff --git a/nova/rpc.py b/nova/rpc/amqp.py index e2771ca88..61555795a 100644 --- a/nova/rpc.py +++ b/nova/rpc/amqp.py @@ -44,9 +44,7 @@ from nova import fakerabbit from nova import flags from nova import log as logging from nova import utils - - -LOG = logging.getLogger('nova.rpc') +from nova.rpc.common import RemoteError, LOG FLAGS = flags.FLAGS @@ -418,25 +416,6 @@ def msg_reply(msg_id, reply=None, failure=None): publisher.close() -class RemoteError(exception.Error): - """Signifies that a remote class has raised an exception. - - Containes a string representation of the type of the original exception, - the value of the original exception, and the traceback. These are - sent to the parent as a joined string so printing the exception - contains all of the relevent info. - - """ - - def __init__(self, exc_type, value, traceback): - self.exc_type = exc_type - self.value = value - self.traceback = traceback - super(RemoteError, self).__init__('%s %s\n%s' % (exc_type, - value, - traceback)) - - def _unpack_context(msg): """Unpack context from msg.""" context_dict = {} diff --git a/nova/rpc/common.py b/nova/rpc/common.py new file mode 100644 index 000000000..1d3065a83 --- /dev/null +++ b/nova/rpc/common.py @@ -0,0 +1,23 @@ +from nova import exception +from nova import log as logging + +LOG = logging.getLogger('nova.rpc') + + +class RemoteError(exception.Error): + """Signifies that a remote class has raised an exception. + + Containes a string representation of the type of the original exception, + the value of the original exception, and the traceback. These are + sent to the parent as a joined string so printing the exception + contains all of the relevent info. + + """ + + def __init__(self, exc_type, value, traceback): + self.exc_type = exc_type + self.value = value + self.traceback = traceback + super(RemoteError, self).__init__('%s %s\n%s' % (exc_type, + value, + traceback)) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 137b671c0..55cea5f8f 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -17,7 +17,8 @@ Handles all requests relating to schedulers. """ -import novaclient +from novaclient import v1_1 as novaclient +from novaclient import exceptions as novaclient_exceptions from nova import db from nova import exception @@ -112,7 +113,7 @@ def _wrap_method(function, self): def _process(func, zone): """Worker stub for green thread pool. Give the worker an authenticated nova client and zone info.""" - nova = novaclient.OpenStack(zone.username, zone.password, None, + nova = novaclient.Client(zone.username, zone.password, None, zone.api_url) nova.authenticate() return func(nova, zone) @@ -132,10 +133,10 @@ def call_zone_method(context, method_name, errors_to_ignore=None, zones = db.zone_get_all(context) for zone in zones: try: - nova = novaclient.OpenStack(zone.username, zone.password, None, + nova = novaclient.Client(zone.username, zone.password, None, zone.api_url) nova.authenticate() - except novaclient.exceptions.BadRequest, e: + except novaclient_exceptions.BadRequest, e: url = zone.api_url LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s") % locals()) @@ -188,7 +189,7 @@ def _issue_novaclient_command(nova, zone, collection, if method_name in ['find', 'findall']: try: return getattr(manager, method_name)(**kwargs) - except novaclient.NotFound: + except novaclient_exceptions.NotFound: url = zone.api_url LOG.debug(_("%(collection)s.%(method_name)s didn't find " "anything matching '%(kwargs)s' on '%(url)s'" % @@ -200,7 +201,7 @@ def _issue_novaclient_command(nova, zone, collection, item = args.pop(0) try: result = manager.get(item) - except novaclient.NotFound: + except novaclient_exceptions.NotFound: url = zone.api_url LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" % locals())) diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index 6f5eb66fd..8c400d476 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -28,6 +28,7 @@ from nova import flags from nova import log as logging from nova.scheduler import zone_aware_scheduler from nova import utils +from nova import exception LOG = logging.getLogger('nova.scheduler.least_cost') diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index c429fdfcc..047dafa6f 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -24,7 +24,9 @@ import operator import json import M2Crypto -import novaclient + +from novaclient import v1_1 as novaclient +from novaclient import exceptions as novaclient_exceptions from nova import crypto from nova import db @@ -58,12 +60,13 @@ class ZoneAwareScheduler(driver.Scheduler): """Create the requested resource in this Zone.""" host = build_plan_item['hostname'] base_options = request_spec['instance_properties'] + image = request_spec['image'] # TODO(sandy): I guess someone needs to add block_device_mapping # support at some point? Also, OS API has no concept of security # groups. instance = compute_api.API().create_db_entry_for_new_instance(context, - base_options, None, []) + image, base_options, None, []) instance_id = instance['id'] kwargs['instance_id'] = instance_id @@ -81,7 +84,7 @@ class ZoneAwareScheduler(driver.Scheduler): decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) try: json_entry = decryptor(blob) - return json.dumps(entry) + return json.dumps(json_entry) except M2Crypto.EVP.EVPError: pass return None @@ -117,10 +120,9 @@ class ZoneAwareScheduler(driver.Scheduler): % locals()) nova = None try: - nova = novaclient.OpenStack(zone.username, zone.password, None, - url) + nova = novaclient.Client(zone.username, zone.password, None, url) nova.authenticate() - except novaclient.exceptions.BadRequest, e: + except novaclient_exceptions.BadRequest, e: raise exception.NotAuthorized(_("Bad credentials attempting " "to talk to zone at %(url)s.") % locals()) diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index efdac06e1..97bdf3d44 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -18,10 +18,11 @@ ZoneManager oversees all communications with child Zones. """ import datetime -import novaclient import thread import traceback +from novaclient import v1_1 as novaclient + from eventlet import greenpool from nova import db @@ -89,8 +90,8 @@ class ZoneState(object): def _call_novaclient(zone): """Call novaclient. Broken out for testing purposes.""" - client = novaclient.OpenStack(zone.username, zone.password, None, - zone.api_url) + client = novaclient.Client(zone.username, zone.password, None, + zone.api_url) return client.zones.info()._info diff --git a/nova/service.py b/nova/service.py index 00e4f61e5..6e9eddc5a 100644 --- a/nova/service.py +++ b/nova/service.py @@ -149,26 +149,22 @@ class Service(object): if 'nova-compute' == self.binary: self.manager.update_available_resource(ctxt) - self.conn = rpc.Connection.instance(new=True) + self.conn = rpc.create_connection(new=True) logging.debug("Creating Consumer connection for Service %s" % self.topic) # Share this same connection for these Consumers - consumer_all = rpc.TopicAdapterConsumer( - connection=self.conn, - topic=self.topic, - proxy=self) - consumer_node = rpc.TopicAdapterConsumer( - connection=self.conn, - topic='%s.%s' % (self.topic, self.host), - proxy=self) - fanout = rpc.FanoutAdapterConsumer( - connection=self.conn, - topic=self.topic, - proxy=self) - consumer_set = rpc.ConsumerSet( - connection=self.conn, - consumer_list=[consumer_all, consumer_node, fanout]) + consumer_all = rpc.create_consumer(self.conn, self.topic, self, + fanout=False) + + node_topic = '%s.%s' % (self.topic, self.host) + consumer_node = rpc.create_consumer(self.conn, node_topic, self, + fanout=False) + + fanout = rpc.create_consumer(self.conn, self.topic, self, fanout=True) + + consumers = [consumer_all, consumer_node, fanout] + consumer_set = rpc.create_consumer_set(self.conn, consumers) # Wait forever, processing these consumers def _wait(): diff --git a/nova/test.py b/nova/test.py index 9790b0aa1..88f1489e8 100644 --- a/nova/test.py +++ b/nova/test.py @@ -60,11 +60,42 @@ class skip_test(object): self.message = msg def __call__(self, func): + @functools.wraps(func) def _skipper(*args, **kw): """Wrapped skipper function.""" raise nose.SkipTest(self.message) - _skipper.__name__ = func.__name__ - _skipper.__doc__ = func.__doc__ + return _skipper + + +class skip_if(object): + """Decorator that skips a test if contition is true.""" + def __init__(self, condition, msg): + self.condition = condition + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + if self.condition: + raise nose.SkipTest(self.message) + func(*args, **kw) + return _skipper + + +class skip_unless(object): + """Decorator that skips a test if condition is not true.""" + def __init__(self, condition, msg): + self.condition = condition + self.message = msg + + def __call__(self, func): + @functools.wraps(func) + def _skipper(*args, **kw): + """Wrapped skipper function.""" + if not self.condition: + raise nose.SkipTest(self.message) + func(*args, **kw) return _skipper @@ -99,9 +130,7 @@ class TestCase(unittest.TestCase): self.flag_overrides = {} self.injected = [] self._services = [] - self._monkey_patch_attach() self._original_flags = FLAGS.FlagValuesDict() - rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size) def tearDown(self): """Runs after each test method to tear down test environment.""" @@ -126,9 +155,6 @@ class TestCase(unittest.TestCase): # Reset any overriden flags self.reset_flags() - # Reset our monkey-patches - rpc.Consumer.attach_to_eventlet = self.original_attach - # Stop any timers for x in self.injected: try: @@ -146,11 +172,9 @@ class TestCase(unittest.TestCase): def flags(self, **kw): """Override flag variables for a test.""" for k, v in kw.iteritems(): - if k in self.flag_overrides: - self.reset_flags() - raise Exception( - 'trying to override already overriden flag: %s' % k) - self.flag_overrides[k] = getattr(FLAGS, k) + # Store original flag value if it's not been overriden yet + if k not in self.flag_overrides: + self.flag_overrides[k] = getattr(FLAGS, k) setattr(FLAGS, k, v) def reset_flags(self): @@ -172,17 +196,6 @@ class TestCase(unittest.TestCase): self._services.append(svc) return svc - def _monkey_patch_attach(self): - self.original_attach = rpc.Consumer.attach_to_eventlet - - def _wrapped(inner_self): - rv = self.original_attach(inner_self) - self.injected.append(rv) - return rv - - _wrapped.func_name = self.original_attach.func_name - rpc.Consumer.attach_to_eventlet = _wrapped - # Useful assertions def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): """Assert two dicts are equivalent. diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py index bfb424afe..458434a81 100644 --- a/nova/tests/api/openstack/__init__.py +++ b/nova/tests/api/openstack/__init__.py @@ -22,14 +22,11 @@ import webob.dec from nova import test from nova import context -from nova import flags from nova.api.openstack.limits import RateLimitingMiddleware from nova.api.openstack.common import limited from nova.tests.api.openstack import fakes from webob import Request -FLAGS = flags.FLAGS - @webob.dec.wsgify def simple_wsgi(req): diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index de006d088..ab7ae2e54 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -74,12 +74,8 @@ class FloatingIpTest(test.TestCase): def setUp(self): super(FloatingIpTest, self).setUp() self.controller = FloatingIPController() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) - fakes.stub_out_auth(self.stubs) self.stubs.Set(network.api.API, "get_floating_ip", network_api_get_floating_ip) self.stubs.Set(network.api.API, "list_floating_ips", @@ -96,7 +92,6 @@ class FloatingIpTest(test.TestCase): self._create_floating_ip() def tearDown(self): - self.stubs.UnsetAll() self._delete_floating_ip() super(FloatingIpTest, self).tearDown() @@ -111,6 +106,11 @@ class FloatingIpTest(test.TestCase): self.assertEqual(view['floating_ip']['fixed_ip'], None) self.assertEqual(view['floating_ip']['instance_id'], None) + def test_translate_floating_ip_view_dict(self): + floating_ip = {'id': 0, 'address': '10.0.0.10', 'fixed_ip': None} + view = _translate_floating_ip_view(floating_ip) + self.assertTrue('floating_ip' in view) + def test_floating_ips_list(self): req = webob.Request.blank('/v1.1/os-floating-ips') res = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/api/openstack/contrib/test_multinic_xs.py b/nova/tests/api/openstack/contrib/test_multinic_xs.py index b0a9f7676..ac28f6be6 100644 --- a/nova/tests/api/openstack/contrib/test_multinic_xs.py +++ b/nova/tests/api/openstack/contrib/test_multinic_xs.py @@ -42,22 +42,14 @@ def compute_api_remove_fixed_ip(self, context, instance_id, address): class FixedIpTest(test.TestCase): def setUp(self): super(FixedIpTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) - fakes.stub_out_auth(self.stubs) self.stubs.Set(compute.api.API, "add_fixed_ip", compute_api_add_fixed_ip) self.stubs.Set(compute.api.API, "remove_fixed_ip", compute_api_remove_fixed_ip) self.context = context.get_admin_context() - def tearDown(self): - self.stubs.UnsetAll() - super(FixedIpTest, self).tearDown() - def test_add_fixed_ip(self): global last_add_fixed_ip last_add_fixed_ip = (None, None) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 26b1de818..a67a28a4e 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -29,6 +29,7 @@ from glance.common import exception as glance_exc from nova import context from nova import exception as exc from nova import utils +from nova import wsgi import nova.api.openstack.auth from nova.api import openstack from nova.api.openstack import auth @@ -40,14 +41,13 @@ import nova.image.fake from nova.image import glance from nova.image import service from nova.tests import fake_flags -from nova.wsgi import Router class Context(object): pass -class FakeRouter(Router): +class FakeRouter(wsgi.Router): def __init__(self): pass @@ -68,21 +68,30 @@ def fake_auth_init(self, application): @webob.dec.wsgify def fake_wsgi(self, req): - req.environ['nova.context'] = context.RequestContext(1, 1) return self.application -def wsgi_app(inner_app10=None, inner_app11=None): +def wsgi_app(inner_app10=None, inner_app11=None, fake_auth=True): if not inner_app10: inner_app10 = openstack.APIRouterV10() if not inner_app11: inner_app11 = openstack.APIRouterV11() - mapper = urlmap.URLMap() - api10 = openstack.FaultWrapper(auth.AuthMiddleware( + + if fake_auth: + ctxt = context.RequestContext('fake', 'fake') + api10 = openstack.FaultWrapper(wsgi.InjectContext(ctxt, + limits.RateLimitingMiddleware(inner_app10))) + api11 = openstack.FaultWrapper(wsgi.InjectContext(ctxt, + limits.RateLimitingMiddleware( + extensions.ExtensionMiddleware(inner_app11)))) + else: + api10 = openstack.FaultWrapper(auth.AuthMiddleware( limits.RateLimitingMiddleware(inner_app10))) - api11 = openstack.FaultWrapper(auth.AuthMiddleware( + api11 = openstack.FaultWrapper(auth.AuthMiddleware( limits.RateLimitingMiddleware( extensions.ExtensionMiddleware(inner_app11)))) + Auth = auth + mapper = urlmap.URLMap() mapper['/v1.0'] = api10 mapper['/v1.1'] = api11 mapper['/'] = openstack.FaultWrapper(versions.Versions()) @@ -104,8 +113,7 @@ def stub_out_key_pair_funcs(stubs, have_key_pair=True): def stub_out_image_service(stubs): def fake_get_image_service(image_href): - image_id = int(str(image_href).split('/')[-1]) - return (nova.image.fake.FakeImageService(), image_id) + return (nova.image.fake.FakeImageService(), image_href) stubs.Set(nova.image, 'get_image_service', fake_get_image_service) stubs.Set(nova.image, 'get_default_image_service', lambda: nova.image.fake.FakeImageService()) @@ -359,17 +367,18 @@ class FakeAuthManager(object): if admin is not None: user.admin = admin - def is_admin(self, user): + def is_admin(self, user_id): + user = self.get_user(user_id) return user.admin - def is_project_member(self, user, project): + def is_project_member(self, user_id, project): if not isinstance(project, Project): try: project = self.get_project(project) except exc.NotFound: raise webob.exc.HTTPUnauthorized() - return ((user.id in project.member_ids) or - (user.id == project.project_manager_id)) + return ((user_id in project.member_ids) or + (user_id == project.project_manager_id)) def create_project(self, name, manager_user, description=None, member_users=None): @@ -396,13 +405,13 @@ class FakeAuthManager(object): else: raise exc.NotFound - def get_projects(self, user=None): - if not user: + def get_projects(self, user_id=None): + if not user_id: return FakeAuthManager.projects.values() else: return [p for p in FakeAuthManager.projects.values() - if (user.id in p.member_ids) or - (user.id == p.project_manager_id)] + if (user_id in p.member_ids) or + (user_id == p.project_manager_id)] class FakeRateLimiter(object): diff --git a/nova/tests/api/openstack/test_accounts.py b/nova/tests/api/openstack/test_accounts.py index 64abcf48c..707a2599f 100644 --- a/nova/tests/api/openstack/test_accounts.py +++ b/nova/tests/api/openstack/test_accounts.py @@ -16,20 +16,14 @@ import json -import stubout import webob -from nova import flags from nova import test from nova.api.openstack import accounts from nova.auth.manager import User from nova.tests.api.openstack import fakes -FLAGS = flags.FLAGS -FLAGS.verbose = True - - def fake_init(self): self.manager = fakes.FakeAuthManager() @@ -41,7 +35,7 @@ def fake_admin_check(self, req): class AccountsTest(test.TestCase): def setUp(self): super(AccountsTest, self).setUp() - self.stubs = stubout.StubOutForTesting() + self.flags(verbose=True, allow_admin_api=True) self.stubs.Set(accounts.Controller, '__init__', fake_init) self.stubs.Set(accounts.Controller, '_check_admin', @@ -52,8 +46,6 @@ class AccountsTest(test.TestCase): fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_auth(self.stubs) - self.allow_admin = FLAGS.allow_admin_api - FLAGS.allow_admin_api = True fakemgr = fakes.FakeAuthManager() joeuser = User('id1', 'guy1', 'acc1', 'secret1', False) superuser = User('id2', 'guy2', 'acc2', 'secret2', True) @@ -62,11 +54,6 @@ class AccountsTest(test.TestCase): fakemgr.create_project('test1', joeuser) fakemgr.create_project('test2', superuser) - def tearDown(self): - self.stubs.UnsetAll() - FLAGS.allow_admin_api = self.allow_admin - super(AccountsTest, self).tearDown() - def test_get_account(self): req = webob.Request.blank('/v1.0/accounts/test1') res = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py index e87255b18..c9e66dc4c 100644 --- a/nova/tests/api/openstack/test_adminapi.py +++ b/nova/tests/api/openstack/test_adminapi.py @@ -16,38 +16,22 @@ # under the License. -import stubout import webob -from paste import urlmap -from nova import flags from nova import test -from nova.api import openstack -from nova.api.openstack import auth from nova.tests.api.openstack import fakes -FLAGS = flags.FLAGS - class AdminAPITest(test.TestCase): def setUp(self): super(AdminAPITest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) - fakes.stub_out_auth(self.stubs) - self.allow_admin = FLAGS.allow_admin_api - - def tearDown(self): - self.stubs.UnsetAll() - FLAGS.allow_admin_api = self.allow_admin - super(AdminAPITest, self).tearDown() + self.flags(verbose=True) def test_admin_enabled(self): - FLAGS.allow_admin_api = True + self.flags(allow_admin_api=True) # We should still be able to access public operations. req = webob.Request.blank('/v1.0/flavors') res = req.get_response(fakes.wsgi_app()) @@ -55,7 +39,7 @@ class AdminAPITest(test.TestCase): # TODO: Confirm admin operations are available. def test_admin_disabled(self): - FLAGS.allow_admin_api = False + self.flags(allow_admin_api=False) # We should still be able to access public operations. req = webob.Request.blank('/v1.0/flavors') res = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index af3478c7d..306ae1aa0 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -17,14 +17,12 @@ import datetime -import stubout import webob import webob.dec import nova.api import nova.api.openstack.auth import nova.auth.manager -from nova import auth from nova import context from nova import db from nova import test @@ -35,7 +33,6 @@ class Test(test.TestCase): def setUp(self): super(Test, self).setUp() - self.stubs = stubout.StubOutForTesting() self.stubs.Set(nova.api.openstack.auth.AuthMiddleware, '__init__', fakes.fake_auth_init) self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext) @@ -45,7 +42,6 @@ class Test(test.TestCase): fakes.stub_out_networking(self.stubs) def tearDown(self): - self.stubs.UnsetAll() fakes.fake_data_store = {} super(Test, self).tearDown() @@ -57,7 +53,7 @@ class Test(test.TestCase): req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '204 No Content') self.assertEqual(len(result.headers['X-Auth-Token']), 40) self.assertEqual(result.headers['X-CDN-Management-Url'], @@ -73,7 +69,7 @@ class Test(test.TestCase): req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '204 No Content') self.assertEqual(len(result.headers['X-Auth-Token']), 40) self.assertEqual(result.headers['X-Server-Management-Url'], @@ -86,7 +82,7 @@ class Test(test.TestCase): self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter) req = webob.Request.blank('/v1.0/fake') req.headers['X-Auth-Token'] = token - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '200 OK') self.assertEqual(result.headers['X-Test-Success'], 'True') @@ -110,7 +106,7 @@ class Test(test.TestCase): req = webob.Request.blank('/v1.0/') req.headers['X-Auth-Token'] = 'token_hash' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') self.assertEqual(self.destroy_called, True) @@ -124,7 +120,7 @@ class Test(test.TestCase): req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '204 No Content') token = result.headers['X-Auth-Token'] @@ -132,7 +128,7 @@ class Test(test.TestCase): req = webob.Request.blank('/v1.0/fake') req.headers['X-Auth-Token'] = token req.headers['X-Auth-Project-Id'] = 'user2_project' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '200 OK') self.assertEqual(result.headers['X-Test-Success'], 'True') @@ -140,7 +136,7 @@ class Test(test.TestCase): req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'unknown_user' req.headers['X-Auth-Key'] = 'unknown_user_key' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') def test_bad_user_good_key(self): @@ -151,18 +147,18 @@ class Test(test.TestCase): req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'unknown_user' req.headers['X-Auth-Key'] = 'user1_key' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') def test_no_user(self): req = webob.Request.blank('/v1.0/') - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') def test_bad_token(self): req = webob.Request.blank('/v1.0/') req.headers['X-Auth-Token'] = 'unknown_token' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') def test_bad_project(self): @@ -177,7 +173,7 @@ class Test(test.TestCase): req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '204 No Content') token = result.headers['X-Auth-Token'] @@ -185,7 +181,7 @@ class Test(test.TestCase): req = webob.Request.blank('/v1.0/fake') req.headers['X-Auth-Token'] = token req.headers['X-Auth-Project-Id'] = 'user2_project' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') def test_not_existing_project(self): @@ -197,7 +193,7 @@ class Test(test.TestCase): req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '204 No Content') token = result.headers['X-Auth-Token'] @@ -205,7 +201,7 @@ class Test(test.TestCase): req = webob.Request.blank('/v1.0/fake') req.headers['X-Auth-Token'] = token req.headers['X-Auth-Project-Id'] = 'unknown_project' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') @@ -226,20 +222,19 @@ class TestFunctional(test.TestCase): req = webob.Request.blank('/v1.0/') req.headers['X-Auth-Token'] = 'test_token_hash' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') def test_token_doesnotexist(self): req = webob.Request.blank('/v1.0/') req.headers['X-Auth-Token'] = 'nonexistant_token_hash' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '401 Unauthorized') class TestLimiter(test.TestCase): def setUp(self): super(TestLimiter, self).setUp() - self.stubs = stubout.StubOutForTesting() self.stubs.Set(nova.api.openstack.auth.AuthMiddleware, '__init__', fakes.fake_auth_init) self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext) @@ -248,7 +243,6 @@ class TestLimiter(test.TestCase): fakes.stub_out_networking(self.stubs) def tearDown(self): - self.stubs.UnsetAll() fakes.fake_data_store = {} super(TestLimiter, self).tearDown() @@ -261,7 +255,7 @@ class TestLimiter(test.TestCase): req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'user1' req.headers['X-Auth-Key'] = 'user1_key' - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(len(result.headers['X-Auth-Token']), 40) token = result.headers['X-Auth-Token'] @@ -269,6 +263,6 @@ class TestLimiter(test.TestCase): req = webob.Request.blank('/v1.0/fake') req.method = 'POST' req.headers['X-Auth-Token'] = token - result = req.get_response(fakes.wsgi_app()) + result = req.get_response(fakes.wsgi_app(fake_auth=False)) self.assertEqual(result.status, '200 OK') self.assertEqual(result.headers['X-Test-Success'], 'True') diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 0b76841f0..5a6e43579 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -323,14 +323,10 @@ class MetadataXMLSerializationTest(test.TestCase): expected = minidom.parseString(""" <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> - <meta key="three"> - four - </meta> - <meta key="one"> - two - </meta> + <meta key="three">four</meta> + <meta key="one">two</meta> </metadata> - """.replace(" ", "")) + """.replace(" ", "").replace("\n", "")) self.assertEqual(expected.toxml(), actual.toxml()) @@ -346,11 +342,9 @@ class MetadataXMLSerializationTest(test.TestCase): expected = minidom.parseString(""" <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> - <meta key="None"> - None - </meta> + <meta key="None">None</meta> </metadata> - """.replace(" ", "")) + """.replace(" ", "").replace("\n", "")) self.assertEqual(expected.toxml(), actual.toxml()) @@ -366,11 +360,9 @@ class MetadataXMLSerializationTest(test.TestCase): expected = minidom.parseString(u""" <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> - <meta key="three"> - Jos\xe9 - </meta> + <meta key="three">Jos\xe9</meta> </metadata> - """.encode("UTF-8").replace(" ", "")) + """.encode("UTF-8").replace(" ", "").replace("\n", "")) self.assertEqual(expected.toxml(), actual.toxml()) @@ -385,10 +377,9 @@ class MetadataXMLSerializationTest(test.TestCase): actual = minidom.parseString(output.replace(" ", "")) expected = minidom.parseString(""" - <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one"> - two - </meta> - """.replace(" ", "")) + <meta xmlns="http://docs.openstack.org/compute/api/v1.1" + key="one">two</meta> + """.replace(" ", "").replace("\n", "")) self.assertEqual(expected.toxml(), actual.toxml()) @@ -405,14 +396,10 @@ class MetadataXMLSerializationTest(test.TestCase): expected = minidom.parseString(""" <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> - <meta key="key6"> - value6 - </meta> - <meta key="key4"> - value4 - </meta> + <meta key="key6">value6</meta> + <meta key="key4">value4</meta> </metadata> - """.replace(" ", "")) + """.replace(" ", "").replace("\n", "")) self.assertEqual(expected.toxml(), actual.toxml()) @@ -427,10 +414,9 @@ class MetadataXMLSerializationTest(test.TestCase): actual = minidom.parseString(output.replace(" ", "")) expected = minidom.parseString(""" - <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one"> - two - </meta> - """.replace(" ", "")) + <meta xmlns="http://docs.openstack.org/compute/api/v1.1" + key="one">two</meta> + """.replace(" ", "").replace("\n", "")) self.assertEqual(expected.toxml(), actual.toxml()) @@ -448,17 +434,11 @@ class MetadataXMLSerializationTest(test.TestCase): expected = minidom.parseString(""" <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> - <meta key="key2"> - value2 - </meta> - <meta key="key9"> - value9 - </meta> - <meta key="key1"> - value1 - </meta> + <meta key="key2">value2</meta> + <meta key="key9">value9</meta> + <meta key="key1">value1</meta> </metadata> - """.replace(" ", "")) + """.replace(" ", "").replace("\n", "")) self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index d459c694f..49f0ea8b3 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -17,20 +17,18 @@ import json import os.path -import stubout -import unittest import webob -from xml.etree import ElementTree +from lxml import etree from nova import context -from nova import flags +from nova import test from nova.api import openstack from nova.api.openstack import extensions from nova.api.openstack import flavors from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil from nova.tests.api.openstack import fakes -FLAGS = flags.FLAGS NS = "{http://docs.openstack.org/compute/api/v1.1}" ATOMNS = "{http://www.w3.org/2005/Atom}" response_body = "Try to say this Mr. Knox, sir..." @@ -80,11 +78,12 @@ class StubExtensionManager(object): return request_extensions -class ExtensionControllerTest(unittest.TestCase): +class ExtensionControllerTest(test.TestCase): def setUp(self): - FLAGS.osapi_extensions_path = os.path.join( - os.path.dirname(__file__), "extensions") + super(ExtensionControllerTest, self).setUp() + ext_path = os.path.join(os.path.dirname(__file__), "extensions") + self.flags(osapi_extensions_path=ext_path) def test_list_extensions_json(self): app = openstack.APIRouterV11() @@ -109,8 +108,8 @@ class ExtensionControllerTest(unittest.TestCase): 'updated': '2011-01-22T13:25:27-06:00', 'description': 'The Fox In Socks Extension', 'alias': 'FOXNSOX', - 'links': [] - } + 'links': [], + }, ) def test_get_extension_json(self): @@ -127,8 +126,8 @@ class ExtensionControllerTest(unittest.TestCase): "updated": "2011-01-22T13:25:27-06:00", "description": "The Fox In Socks Extension", "alias": "FOXNSOX", - "links": [] - } + "links": [], + }, ) def test_list_extensions_xml(self): @@ -140,7 +139,7 @@ class ExtensionControllerTest(unittest.TestCase): self.assertEqual(200, response.status_int) print response.body - root = ElementTree.XML(response.body) + root = etree.XML(response.body) self.assertEqual(root.tag.split('extensions')[0], NS) # Make sure we have all the extensions. @@ -156,6 +155,8 @@ class ExtensionControllerTest(unittest.TestCase): self.assertEqual(fox_ext.findtext('{0}description'.format(NS)), 'The Fox In Socks Extension') + xmlutil.validate_schema(root, 'extensions') + def test_get_extension_xml(self): app = openstack.APIRouterV11() ext_midware = extensions.ExtensionMiddleware(app) @@ -163,9 +164,10 @@ class ExtensionControllerTest(unittest.TestCase): request.accept = "application/xml" response = request.get_response(ext_midware) self.assertEqual(200, response.status_int) - print response.body + xml = response.body + print xml - root = ElementTree.XML(response.body) + root = etree.XML(xml) self.assertEqual(root.tag.split('extension')[0], NS) self.assertEqual(root.get('alias'), 'FOXNSOX') self.assertEqual(root.get('name'), 'Fox In Socks') @@ -175,8 +177,15 @@ class ExtensionControllerTest(unittest.TestCase): self.assertEqual(root.findtext('{0}description'.format(NS)), 'The Fox In Socks Extension') + xmlutil.validate_schema(root, 'extension') + -class ResourceExtensionTest(unittest.TestCase): +class ResourceExtensionTest(test.TestCase): + + def setUp(self): + super(ResourceExtensionTest, self).setUp() + ext_path = os.path.join(os.path.dirname(__file__), "extensions") + self.flags(osapi_extensions_path=ext_path) def test_no_extension_present(self): manager = StubExtensionManager(None) @@ -214,13 +223,14 @@ class InvalidExtension(object): return "THIRD" -class ExtensionManagerTest(unittest.TestCase): +class ExtensionManagerTest(test.TestCase): response_body = "Try to say this Mr. Knox, sir..." def setUp(self): - FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__), - "extensions") + super(ExtensionManagerTest, self).setUp() + ext_path = os.path.join(os.path.dirname(__file__), "extensions") + self.flags(osapi_extensions_path=ext_path) def test_get_resources(self): app = openstack.APIRouterV11() @@ -239,11 +249,12 @@ class ExtensionManagerTest(unittest.TestCase): self.assertTrue('THIRD' not in ext_mgr.extensions) -class ActionExtensionTest(unittest.TestCase): +class ActionExtensionTest(test.TestCase): def setUp(self): - FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__), - "extensions") + super(ActionExtensionTest, self).setUp() + ext_path = os.path.join(os.path.dirname(__file__), "extensions") + self.flags(osapi_extensions_path=ext_path) def _send_server_action_request(self, url, body): app = openstack.APIRouterV11() @@ -277,19 +288,12 @@ class ActionExtensionTest(unittest.TestCase): self.assertEqual(404, response.status_int) -class RequestExtensionTest(unittest.TestCase): +class RequestExtensionTest(test.TestCase): def setUp(self): super(RequestExtensionTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} - fakes.stub_out_auth(self.stubs) - self.context = context.get_admin_context() - - def tearDown(self): - self.stubs.UnsetAll() - super(RequestExtensionTest, self).tearDown() + ext_path = os.path.join(os.path.dirname(__file__), "extensions") + self.flags(osapi_extensions_path=ext_path) def test_get_resources_with_stub_mgr(self): @@ -327,7 +331,7 @@ class RequestExtensionTest(unittest.TestCase): self.assertEqual("Pig Bands!", response_data['big_bands']) -class ExtensionsXMLSerializerTest(unittest.TestCase): +class ExtensionsXMLSerializerTest(test.TestCase): def test_serialize_extenstion(self): serializer = extensions.ExtensionsXMLSerializer() @@ -342,19 +346,20 @@ class ExtensionsXMLSerializerTest(unittest.TestCase): { 'rel': 'describedby', 'type': 'application/pdf', - 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf' + 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf', }, { 'rel': 'describedby', 'type': 'application/vnd.sun.wadl+xml', - 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl' - } - ] - } + 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl', + }, + ], + }, } xml = serializer.serialize(data, 'show') - root = ElementTree.XML(xml) + print xml + root = etree.XML(xml) ext_dict = data['extension'] self.assertEqual(root.findtext('{0}description'.format(NS)), ext_dict['description']) @@ -368,6 +373,8 @@ class ExtensionsXMLSerializerTest(unittest.TestCase): for key, value in link.items(): self.assertEqual(link_nodes[i].get(key), value) + xmlutil.validate_schema(root, 'extension') + def test_serialize_extensions(self): serializer = extensions.ExtensionsXMLSerializer() data = { @@ -382,14 +389,14 @@ class ExtensionsXMLSerializerTest(unittest.TestCase): { "rel": "describedby", "type": "application/pdf", - "href": "http://foo.com/api/ext/cs-pie.pdf" + "href": "http://foo.com/api/ext/cs-pie.pdf", }, { "rel": "describedby", "type": "application/vnd.sun.wadl+xml", - "href": "http://foo.com/api/ext/cs-pie.wadl" - } - ] + "href": "http://foo.com/api/ext/cs-pie.wadl", + }, + ], }, { "name": "Cloud Block Storage", @@ -401,21 +408,21 @@ class ExtensionsXMLSerializerTest(unittest.TestCase): { "rel": "describedby", "type": "application/pdf", - "href": "http://foo.com/api/ext/cs-cbs.pdf" + "href": "http://foo.com/api/ext/cs-cbs.pdf", }, { "rel": "describedby", "type": "application/vnd.sun.wadl+xml", - "href": "http://foo.com/api/ext/cs-cbs.wadl" - } - ] - } - ] + "href": "http://foo.com/api/ext/cs-cbs.wadl", + }, + ], + }, + ], } xml = serializer.serialize(data, 'index') print xml - root = ElementTree.XML(xml) + root = etree.XML(xml) ext_elems = root.findall('{0}extension'.format(NS)) self.assertEqual(len(ext_elems), 2) for i, ext_elem in enumerate(ext_elems): @@ -431,3 +438,5 @@ class ExtensionsXMLSerializerTest(unittest.TestCase): for i, link in enumerate(ext_dict['links']): for key, value in link.items(): self.assertEqual(link_nodes[i].get(key), value) + + xmlutil.validate_schema(root, 'extensions') diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index 4ac35b26b..d0fe72001 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -16,7 +16,6 @@ # under the License. import json -import stubout import webob import xml.dom.minidom as minidom @@ -56,12 +55,8 @@ def return_instance_type_not_found(context, flavor_id): class FlavorsTest(test.TestCase): def setUp(self): super(FlavorsTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) - fakes.stub_out_auth(self.stubs) self.stubs.Set(nova.db.api, "instance_type_get_all", return_instance_types) self.stubs.Set(nova.db.api, "instance_type_get_by_flavor_id", diff --git a/nova/tests/api/openstack/extensions/test_flavors_extra_specs.py b/nova/tests/api/openstack/test_flavors_extra_specs.py index 2c1c335b0..ccd1b0d9f 100644 --- a/nova/tests/api/openstack/extensions/test_flavors_extra_specs.py +++ b/nova/tests/api/openstack/test_flavors_extra_specs.py @@ -17,20 +17,16 @@ import json import stubout -import unittest import webob import os.path -from nova import flags +from nova import test from nova.api import openstack -from nova.api.openstack import auth from nova.api.openstack import extensions from nova.tests.api.openstack import fakes import nova.wsgi -FLAGS = flags.FLAGS - def return_create_flavor_extra_specs(context, flavor_id, extra_specs): return stub_flavor_extra_specs() @@ -40,10 +36,6 @@ def return_flavor_extra_specs(context, flavor_id): return stub_flavor_extra_specs() -def return_flavor_extra_specs(context, flavor_id): - return stub_flavor_extra_specs() - - def return_empty_flavor_extra_specs(context, flavor_id): return {} @@ -62,30 +54,17 @@ def stub_flavor_extra_specs(): return specs -class FlavorsExtraSpecsTest(unittest.TestCase): +class FlavorsExtraSpecsTest(test.TestCase): def setUp(self): super(FlavorsExtraSpecsTest, self).setUp() - FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__), - "extensions") - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.auth_data = {} - fakes.FakeAuthDatabase.data = {} - fakes.stub_out_auth(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) - self.mware = auth.AuthMiddleware( - extensions.ExtensionMiddleware( - openstack.APIRouterV11())) - - def tearDown(self): - self.stubs.UnsetAll() - super(FlavorsExtraSpecsTest, self).tearDown() def test_index(self): self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get', return_flavor_extra_specs) - request = webob.Request.blank('/flavors/1/os-extra_specs') - res = request.get_response(self.mware) + request = webob.Request.blank('/v1.1/flavors/1/os-extra_specs') + res = request.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) res_dict = json.loads(res.body) self.assertEqual('application/json', res.headers['Content-Type']) @@ -94,8 +73,8 @@ class FlavorsExtraSpecsTest(unittest.TestCase): def test_index_no_data(self): self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get', return_empty_flavor_extra_specs) - req = webob.Request.blank('/flavors/1/os-extra_specs') - res = req.get_response(self.mware) + req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs') + res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) self.assertEqual('application/json', res.headers['Content-Type']) @@ -104,8 +83,8 @@ class FlavorsExtraSpecsTest(unittest.TestCase): def test_show(self): self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get', return_flavor_extra_specs) - req = webob.Request.blank('/flavors/1/os-extra_specs/key5') - res = req.get_response(self.mware) + req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/key5') + res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) res_dict = json.loads(res.body) self.assertEqual('application/json', res.headers['Content-Type']) @@ -114,28 +93,28 @@ class FlavorsExtraSpecsTest(unittest.TestCase): def test_show_spec_not_found(self): self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get', return_empty_flavor_extra_specs) - req = webob.Request.blank('/flavors/1/os-extra_specs/key6') - res = req.get_response(self.mware) + req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/key6') + res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(404, res.status_int) def test_delete(self): self.stubs.Set(nova.db.api, 'instance_type_extra_specs_delete', delete_flavor_extra_specs) - req = webob.Request.blank('/flavors/1/os-extra_specs/key5') + req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/key5') req.method = 'DELETE' - res = req.get_response(self.mware) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) def test_create(self): self.stubs.Set(nova.db.api, 'instance_type_extra_specs_update_or_create', return_create_flavor_extra_specs) - req = webob.Request.blank('/flavors/1/os-extra_specs') + req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs') req.method = 'POST' req.body = '{"extra_specs": {"key1": "value1"}}' req.headers["content-type"] = "application/json" - res = req.get_response(self.mware) + res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) self.assertEqual('application/json', res.headers['Content-Type']) @@ -145,21 +124,21 @@ class FlavorsExtraSpecsTest(unittest.TestCase): self.stubs.Set(nova.db.api, 'instance_type_extra_specs_update_or_create', return_create_flavor_extra_specs) - req = webob.Request.blank('/flavors/1/os-extra_specs') + req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs') req.method = 'POST' req.headers["content-type"] = "application/json" - res = req.get_response(self.mware) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) def test_update_item(self): self.stubs.Set(nova.db.api, 'instance_type_extra_specs_update_or_create', return_create_flavor_extra_specs) - req = webob.Request.blank('/flavors/1/os-extra_specs/key1') + req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/key1') req.method = 'PUT' req.body = '{"key1": "value1"}' req.headers["content-type"] = "application/json" - res = req.get_response(self.mware) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) self.assertEqual('application/json', res.headers['Content-Type']) res_dict = json.loads(res.body) @@ -169,30 +148,30 @@ class FlavorsExtraSpecsTest(unittest.TestCase): self.stubs.Set(nova.db.api, 'instance_type_extra_specs_update_or_create', return_create_flavor_extra_specs) - req = webob.Request.blank('/flavors/1/os-extra_specs/key1') + req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/key1') req.method = 'PUT' req.headers["content-type"] = "application/json" - res = req.get_response(self.mware) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) def test_update_item_too_many_keys(self): self.stubs.Set(nova.db.api, 'instance_type_extra_specs_update_or_create', return_create_flavor_extra_specs) - req = webob.Request.blank('/flavors/1/os-extra_specs/key1') + req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/key1') req.method = 'PUT' req.body = '{"key1": "value1", "key2": "value2"}' req.headers["content-type"] = "application/json" - res = req.get_response(self.mware) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) def test_update_item_body_uri_mismatch(self): self.stubs.Set(nova.db.api, 'instance_type_extra_specs_update_or_create', return_create_flavor_extra_specs) - req = webob.Request.blank('/flavors/1/os-extra_specs/bad') + req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/bad') req.method = 'PUT' req.body = '{"key1": "value1"}' req.headers["content-type"] = "application/json" - res = req.get_response(self.mware) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py index c9137cc24..56a0932e7 100644 --- a/nova/tests/api/openstack/test_image_metadata.py +++ b/nova/tests/api/openstack/test_image_metadata.py @@ -16,8 +16,6 @@ # under the License. import json -import stubout -import unittest import webob @@ -84,23 +82,13 @@ class ImageMetaDataTest(test.TestCase): def setUp(self): super(ImageMetaDataTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - self.orig_image_service = FLAGS.image_service - FLAGS.image_service = 'nova.image.glance.GlanceImageService' - fakes.FakeAuthManager.auth_data = {} - fakes.FakeAuthDatabase.data = {} - fakes.stub_out_auth(self.stubs) + self.flags(image_service='nova.image.glance.GlanceImageService') # NOTE(dprince) max out properties/metadata in image 3 for testing img3 = self.IMAGE_FIXTURES[2] for num in range(FLAGS.quota_metadata_items): img3['properties']['key%i' % num] = "blah" fakes.stub_out_glance(self.stubs, self.IMAGE_FIXTURES) - def tearDown(self): - self.stubs.UnsetAll() - FLAGS.image_service = self.orig_image_service - super(ImageMetaDataTest, self).tearDown() - def test_index(self): req = webob.Request.blank('/v1.1/images/1/metadata') res = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 87a695dde..383ed2e03 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -34,7 +34,6 @@ import webob from glance import client as glance_client from nova import context from nova import exception -from nova import flags from nova import test from nova import utils import nova.api.openstack @@ -42,9 +41,6 @@ from nova.api.openstack import images from nova.tests.api.openstack import fakes -FLAGS = flags.FLAGS - - class _BaseImageServiceTests(test.TestCase): """Tasks to test for all image services""" @@ -155,7 +151,7 @@ class GlanceImageServiceTest(_BaseImageServiceTests): fakes.stub_out_compute_api_snapshot(self.stubs) service_class = 'nova.image.glance.GlanceImageService' self.service = utils.import_object(service_class) - self.context = context.RequestContext(1, None) + self.context = context.RequestContext('fake', 'fake') self.service.delete_all() self.sent_to_glance = {} fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance) @@ -168,7 +164,7 @@ class GlanceImageServiceTest(_BaseImageServiceTests): """Ensure instance_id is persisted as an image-property""" fixture = {'name': 'test image', 'is_public': False, - 'properties': {'instance_id': '42', 'user_id': '1'}} + 'properties': {'instance_id': '42', 'user_id': 'fake'}} image_id = self.service.create(self.context, fixture)['id'] expected = fixture @@ -178,7 +174,7 @@ class GlanceImageServiceTest(_BaseImageServiceTests): expected = {'id': image_id, 'name': 'test image', 'is_public': False, - 'properties': {'instance_id': '42', 'user_id': '1'}} + 'properties': {'instance_id': '42', 'user_id': 'fake'}} self.assertDictMatch(image_meta, expected) image_metas = self.service.detail(self.context) @@ -328,14 +324,10 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): def setUp(self): """Run before each test.""" super(ImageControllerWithGlanceServiceTest, self).setUp() - self.orig_image_service = FLAGS.image_service - FLAGS.image_service = 'nova.image.glance.GlanceImageService' + self.flags(image_service='nova.image.glance.GlanceImageService') self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) - fakes.stub_out_auth(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) self.fixtures = self._make_image_fixtures() fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures) @@ -345,14 +337,13 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): def tearDown(self): """Run after each test.""" self.stubs.UnsetAll() - FLAGS.image_service = self.orig_image_service super(ImageControllerWithGlanceServiceTest, self).tearDown() def _applicable_fixture(self, fixture, user_id): """Determine if this fixture is applicable for given user id.""" is_public = fixture["is_public"] try: - uid = int(fixture["properties"]["user_id"]) + uid = fixture["properties"]["user_id"] except KeyError: uid = None return uid == user_id or is_public @@ -388,6 +379,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): "updated": self.NOW_API_FORMAT, "created": self.NOW_API_FORMAT, "status": "ACTIVE", + "progress": 100, }, } @@ -411,6 +403,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): "updated": self.NOW_API_FORMAT, "created": self.NOW_API_FORMAT, "status": "QUEUED", + "progress": 0, 'server': { 'id': 42, "links": [{ @@ -424,7 +417,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, "metadata": { "instance_ref": "http://localhost/v1.1/servers/42", - "user_id": "1", + "user_id": "fake", }, "links": [{ "rel": "self", @@ -453,6 +446,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): updated="%(expected_now)s" created="%(expected_now)s" status="ACTIVE" + progress="100" xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" /> """ % (locals())) @@ -472,6 +466,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): updated="%(expected_now)s" created="%(expected_now)s" status="ACTIVE" + progress="100" xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" /> """ % (locals())) @@ -559,7 +554,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): fixtures = copy.copy(self.fixtures) for image in fixtures: - if not self._applicable_fixture(image, 1): + if not self._applicable_fixture(image, "fake"): fixtures.remove(image) continue @@ -596,6 +591,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', + 'progress': 100, }, { 'id': 124, @@ -603,6 +599,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'QUEUED', + 'progress': 0, }, { 'id': 125, @@ -617,7 +614,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'name': 'active snapshot', 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, - 'status': 'ACTIVE' + 'status': 'ACTIVE', + 'progress': 100, }, { 'id': 127, @@ -625,6 +623,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'FAILED', + 'progress': 0, }, { 'id': 129, @@ -632,6 +631,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', + 'progress': 100, }] self.assertDictListMatch(expected, response_list) @@ -652,6 +652,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', + 'progress': 100, "links": [{ "rel": "self", "href": "http://localhost/v1.1/images/123", @@ -666,11 +667,12 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'name': 'queued snapshot', 'metadata': { u'instance_ref': u'http://localhost/v1.1/servers/42', - u'user_id': u'1', + u'user_id': u'fake', }, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'QUEUED', + 'progress': 0, 'server': { 'id': 42, "links": [{ @@ -696,7 +698,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'name': 'saving snapshot', 'metadata': { u'instance_ref': u'http://localhost/v1.1/servers/42', - u'user_id': u'1', + u'user_id': u'fake', }, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, @@ -727,11 +729,12 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'name': 'active snapshot', 'metadata': { u'instance_ref': u'http://localhost/v1.1/servers/42', - u'user_id': u'1', + u'user_id': u'fake', }, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', + 'progress': 100, 'server': { 'id': 42, "links": [{ @@ -757,11 +760,12 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'name': 'killed snapshot', 'metadata': { u'instance_ref': u'http://localhost/v1.1/servers/42', - u'user_id': u'1', + u'user_id': u'fake', }, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'FAILED', + 'progress': 0, 'server': { 'id': 42, "links": [{ @@ -789,6 +793,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', + 'progress': 100, "links": [{ "rel": "self", "href": "http://localhost/v1.1/images/129", @@ -1010,7 +1015,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_meta = json.loads(res.body)['image'] expected = {'id': 123, 'name': 'public image', 'updated': self.NOW_API_FORMAT, - 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE'} + 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', + 'progress': 100} self.assertDictMatch(image_meta, expected) def test_get_image_non_existent(self): @@ -1034,6 +1040,9 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): req.headers["content-type"] = "application/json" response = req.get_response(fakes.wsgi_app()) self.assertEqual(200, response.status_int) + image_meta = json.loads(response.body)['image'] + self.assertEqual(123, image_meta['serverId']) + self.assertEqual('Snapshot 1', image_meta['name']) def test_create_snapshot_no_name(self): """Name is required for snapshots""" @@ -1045,82 +1054,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): response = req.get_response(fakes.wsgi_app()) self.assertEqual(400, response.status_int) - def test_create_backup_no_name(self): - """Name is also required for backups""" - body = dict(image=dict(serverId='123', image_type='backup', - backup_type='daily', rotation=1)) - req = webob.Request.blank('/v1.0/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - - def test_create_backup_with_rotation_and_backup_type(self): - """The happy path for creating backups - - Creating a backup is an admin-only operation, as opposed to snapshots - which are available to anybody. - """ - # FIXME(sirp): teardown needed? - FLAGS.allow_admin_api = True - - # FIXME(sirp): should the fact that backups are admin_only be a FLAG - body = dict(image=dict(serverId='123', image_type='backup', - name='Backup 1', - backup_type='daily', rotation=1)) - req = webob.Request.blank('/v1.0/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(200, response.status_int) - - def test_create_backup_no_rotation(self): - """Rotation is required for backup requests""" - # FIXME(sirp): teardown needed? - FLAGS.allow_admin_api = True - - # FIXME(sirp): should the fact that backups are admin_only be a FLAG - body = dict(image=dict(serverId='123', name='daily', - image_type='backup', backup_type='daily')) - req = webob.Request.blank('/v1.0/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - - def test_create_backup_no_backup_type(self): - """Backup Type (daily or weekly) is required for backup requests""" - # FIXME(sirp): teardown needed? - FLAGS.allow_admin_api = True - - # FIXME(sirp): should the fact that backups are admin_only be a FLAG - body = dict(image=dict(serverId='123', name='daily', - image_type='backup', rotation=1)) - req = webob.Request.blank('/v1.0/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - - def test_create_image_with_invalid_image_type(self): - """Valid image_types are snapshot | daily | weekly""" - # FIXME(sirp): teardown needed? - FLAGS.allow_admin_api = True - - # FIXME(sirp): should the fact that backups are admin_only be a FLAG - body = dict(image=dict(serverId='123', image_type='monthly', - rotation=1)) - req = webob.Request.blank('/v1.0/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - def test_create_image_no_server_id(self): body = dict(image=dict(name='Snapshot 1')) @@ -1131,107 +1064,10 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): response = req.get_response(fakes.wsgi_app()) self.assertEqual(400, response.status_int) - def test_create_image_v1_1(self): - - body = dict(image=dict(serverRef='123', name='Snapshot 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(200, response.status_int) - - def test_create_image_v1_1_actual_server_ref(self): - - serverRef = 'http://localhost/v1.1/servers/1' - serverBookmark = 'http://localhost/servers/1' - body = dict(image=dict(serverRef=serverRef, name='Backup 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(200, response.status_int) - result = json.loads(response.body) - expected = { - 'id': 1, - 'links': [ - { - 'rel': 'self', - 'href': serverRef, - }, - { - 'rel': 'bookmark', - 'href': serverBookmark, - }, - ] - } - self.assertEqual(result['image']['server'], expected) - - def test_create_image_v1_1_actual_server_ref_port(self): - - serverRef = 'http://localhost:8774/v1.1/servers/1' - serverBookmark = 'http://localhost:8774/servers/1' - body = dict(image=dict(serverRef=serverRef, name='Backup 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(200, response.status_int) - result = json.loads(response.body) - expected = { - 'id': 1, - 'links': [ - { - 'rel': 'self', - 'href': serverRef, - }, - { - 'rel': 'bookmark', - 'href': serverBookmark, - }, - ] - } - self.assertEqual(result['image']['server'], expected) - - def test_create_image_v1_1_server_ref_bad_hostname(self): - - serverRef = 'http://asdf/v1.1/servers/1' - body = dict(image=dict(serverRef=serverRef, name='Backup 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - - def test_create_image_v1_1_no_server_ref(self): - - body = dict(image=dict(name='Snapshot 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - - def test_create_image_v1_1_server_ref_missing_version(self): - - serverRef = 'http://localhost/servers/1' - body = dict(image=dict(serverRef=serverRef, name='Backup 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) - - def test_create_image_v1_1_server_ref_missing_id(self): - - serverRef = 'http://localhost/v1.1/servers' - body = dict(image=dict(serverRef=serverRef, name='Backup 1')) - req = webob.Request.blank('/v1.1/images') + def test_create_image_snapshots_disabled(self): + self.flags(allow_instance_snapshots=False) + body = dict(image=dict(serverId='123', name='Snapshot 1')) + req = webob.Request.blank('/v1.0/images') req.method = 'POST' req.body = json.dumps(body) req.headers["content-type"] = "application/json" @@ -1259,7 +1095,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): # Snapshot for User 1 server_ref = 'http://localhost/v1.1/servers/42' - snapshot_properties = {'instance_ref': server_ref, 'user_id': '1'} + snapshot_properties = {'instance_ref': server_ref, 'user_id': 'fake'} for status in ('queued', 'saving', 'active', 'killed'): add_fixture(id=image_id, name='%s snapshot' % status, is_public=False, status=status, @@ -1267,7 +1103,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_id += 1 # Snapshot for User 2 - other_snapshot_properties = {'instance_id': '43', 'user_id': '2'} + other_snapshot_properties = {'instance_id': '43', 'user_id': 'other'} add_fixture(id=image_id, name='someone elses snapshot', is_public=False, status='active', properties=other_snapshot_properties) @@ -1716,76 +1552,3 @@ class ImageXMLSerializationTest(test.TestCase): """.replace(" ", "") % (locals())) self.assertEqual(expected.toxml(), actual.toxml()) - - def test_create(self): - serializer = images.ImageXMLSerializer() - - fixture = { - 'image': { - 'id': 1, - 'name': 'Image1', - 'created': self.TIMESTAMP, - 'updated': self.TIMESTAMP, - 'status': 'SAVING', - 'progress': 80, - 'server': { - 'id': 1, - 'links': [ - { - 'href': self.SERVER_HREF, - 'rel': 'self', - }, - { - 'href': self.SERVER_BOOKMARK, - 'rel': 'bookmark', - }, - ], - }, - 'metadata': { - 'key1': 'value1', - }, - 'links': [ - { - 'href': self.IMAGE_HREF % 1, - 'rel': 'self', - }, - { - 'href': self.IMAGE_BOOKMARK % 1, - 'rel': 'bookmark', - }, - ], - }, - } - - output = serializer.serialize(fixture, 'create') - actual = minidom.parseString(output.replace(" ", "")) - - expected_server_href = self.SERVER_HREF - expected_server_bookmark = self.SERVER_BOOKMARK - expected_href = self.IMAGE_HREF % 1 - expected_bookmark = self.IMAGE_BOOKMARK % 1 - expected_now = self.TIMESTAMP - expected = minidom.parseString(""" - <image id="1" - xmlns="http://docs.openstack.org/compute/api/v1.1" - xmlns:atom="http://www.w3.org/2005/Atom" - name="Image1" - updated="%(expected_now)s" - created="%(expected_now)s" - status="SAVING" - progress="80"> - <server id="1"> - <atom:link rel="self" href="%(expected_server_href)s"/> - <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/> - </server> - <metadata> - <meta key="key1"> - value1 - </meta> - </metadata> - <atom:link href="%(expected_href)s" rel="self"/> - <atom:link href="%(expected_bookmark)s" rel="bookmark"/> - </image> - """.replace(" ", "") % (locals())) - - self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 8a3fe681a..6c3d531e3 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -920,7 +920,7 @@ class LimitsViewBuilderV11Test(test.TestCase): "verb": "POST", "remaining": 2, "unit": "MINUTE", - "resetTime": 1311272226 + "resetTime": 1311272226, }, { "URI": "*/servers", @@ -929,7 +929,7 @@ class LimitsViewBuilderV11Test(test.TestCase): "verb": "POST", "remaining": 10, "unit": "DAY", - "resetTime": 1311272226 + "resetTime": 1311272226, }, ] self.absolute_limits = { @@ -954,7 +954,7 @@ class LimitsViewBuilderV11Test(test.TestCase): "verb": "POST", "remaining": 2, "unit": "MINUTE", - "next-available": "2011-07-21T18:17:06Z" + "next-available": "2011-07-21T18:17:06Z", }, ] }, @@ -967,7 +967,7 @@ class LimitsViewBuilderV11Test(test.TestCase): "verb": "POST", "remaining": 10, "unit": "DAY", - "next-available": "2011-07-21T18:17:06Z" + "next-available": "2011-07-21T18:17:06Z", }, ] }, @@ -989,7 +989,7 @@ class LimitsViewBuilderV11Test(test.TestCase): expected_limits = { "limits": { "rate": [], - "absolute": {} + "absolute": {}, } } @@ -1022,7 +1022,7 @@ class LimitsXMLSerializationTest(test.TestCase): "verb": "POST", "remaining": 2, "unit": "MINUTE", - "next-available": "2011-12-15T22:42:45Z" + "next-available": "2011-12-15T22:42:45Z", }, ] }, @@ -1083,7 +1083,7 @@ class LimitsXMLSerializationTest(test.TestCase): fixture = { "limits": { "rate": [], - "absolute": {} + "absolute": {}, } } diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py new file mode 100644 index 000000000..bf18bc1b0 --- /dev/null +++ b/nova/tests/api/openstack/test_server_actions.py @@ -0,0 +1,1037 @@ +import base64 +import json +import unittest +from xml.dom import minidom + +import stubout +import webob + +from nova import context +from nova import db +from nova import utils +from nova.api.openstack import create_instance_helper +from nova.compute import instance_types +from nova.compute import power_state +import nova.db.api +from nova import test +from nova.tests.api.openstack import common +from nova.tests.api.openstack import fakes + + +def return_server_by_id(context, id): + return _get_instance() + + +def instance_update(context, instance_id, kwargs): + return _get_instance() + + +def return_server_with_power_state(power_state): + def _return_server(context, id): + instance = _get_instance() + instance['state'] = power_state + return instance + return _return_server + + +def return_server_with_uuid_and_power_state(power_state): + def _return_server(context, id): + return return_server_with_power_state(power_state) + return _return_server + + +class MockSetAdminPassword(object): + def __init__(self): + self.instance_id = None + self.password = None + + def __call__(self, context, instance_id, password): + self.instance_id = instance_id + self.password = password + + +def _get_instance(): + instance = { + "id": 1, + "created_at": "2010-10-10 12:00:00", + "updated_at": "2010-11-11 11:00:00", + "admin_pass": "", + "user_id": "", + "project_id": "", + "image_ref": "5", + "kernel_id": "", + "ramdisk_id": "", + "launch_index": 0, + "key_name": "", + "key_data": "", + "state": 0, + "state_description": "", + "memory_mb": 0, + "vcpus": 0, + "local_gb": 0, + "hostname": "", + "host": "", + "instance_type": { + "flavorid": 1, + }, + "user_data": "", + "reservation_id": "", + "mac_address": "", + "scheduled_at": utils.utcnow(), + "launched_at": utils.utcnow(), + "terminated_at": utils.utcnow(), + "availability_zone": "", + "display_name": "test_server", + "display_description": "", + "locked": False, + "metadata": [], + #"address": , + #"floating_ips": [{"address":ip} for ip in public_addresses]} + "uuid": "deadbeef-feed-edee-beef-d0ea7beefedd"} + + return instance + + +class ServerActionsTest(test.TestCase): + + def setUp(self): + self.maxDiff = None + super(ServerActionsTest, self).setUp() + self.flags(verbose=True) + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_auth(self.stubs) + self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id) + self.stubs.Set(nova.db.api, 'instance_update', instance_update) + + self.webreq = common.webob_factory('/v1.0/servers') + + def tearDown(self): + self.stubs.UnsetAll() + + def test_server_change_password(self): + body = {'changePassword': {'adminPass': '1234pass'}} + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 501) + + def test_server_change_password_xml(self): + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.content_type = 'application/xml' + req.body = '<changePassword adminPass="1234pass">' +# res = req.get_response(fakes.wsgi_app()) +# self.assertEqual(res.status_int, 501) + + def test_server_reboot(self): + body = dict(server=dict( + name='server_test', imageId=2, flavorId=2, metadata={}, + personality={})) + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + + def test_server_rebuild_accepted(self): + body = { + "rebuild": { + "imageId": 2, + }, + } + + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(res.body, "") + + def test_server_rebuild_rejected_when_building(self): + body = { + "rebuild": { + "imageId": 2, + }, + } + + state = power_state.BUILDING + new_return_server = return_server_with_power_state(state) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + self.stubs.Set(nova.db, 'instance_get_by_uuid', + return_server_with_uuid_and_power_state(state)) + + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 409) + + def test_server_rebuild_bad_entity(self): + body = { + "rebuild": { + }, + } + + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_resize_server(self): + req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) + + self.resize_called = False + + def resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + + def test_resize_bad_flavor_fails(self): + req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3))) + + self.resize_called = False + + def resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + self.assertEqual(self.resize_called, False) + + def test_resize_raises_fails(self): + req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) + + def resize_mock(*args): + raise Exception('hurr durr') + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 500) + + def test_resized_server_has_correct_status(self): + req = self.webreq('/1', 'GET') + + def fake_migration_get(*args): + return {} + + self.stubs.Set(nova.db, 'migration_get_by_instance_and_status', + fake_migration_get) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + body = json.loads(res.body) + self.assertEqual(body['server']['status'], 'RESIZE-CONFIRM') + + def test_confirm_resize_server(self): + req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) + + self.resize_called = False + + def confirm_resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'confirm_resize', + confirm_resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 204) + self.assertEqual(self.resize_called, True) + + def test_confirm_resize_server_fails(self): + req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) + + def confirm_resize_mock(*args): + raise Exception('hurr durr') + + self.stubs.Set(nova.compute.api.API, 'confirm_resize', + confirm_resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_revert_resize_server(self): + req = self.webreq('/1/action', 'POST', dict(revertResize=None)) + + self.resize_called = False + + def revert_resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'revert_resize', + revert_resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + + def test_revert_resize_server_fails(self): + req = self.webreq('/1/action', 'POST', dict(revertResize=None)) + + def revert_resize_mock(*args): + raise Exception('hurr durr') + + self.stubs.Set(nova.compute.api.API, 'revert_resize', + revert_resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_migrate_server(self): + """This is basically the same as resize, only we provide the `migrate` + attribute in the body's dict. + """ + req = self.webreq('/1/migrate', 'POST') + + self.resize_called = False + + def resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + + def test_create_backup(self): + """The happy path for creating backups""" + self.flags(allow_admin_api=True) + + body = { + 'createBackup': { + 'name': 'Backup 1', + 'backup_type': 'daily', + 'rotation': 1, + }, + } + + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(202, response.status_int) + self.assertTrue(response.headers['Location']) + + def test_create_backup_admin_api_off(self): + """The happy path for creating backups""" + self.flags(allow_admin_api=False) + + body = { + 'createBackup': { + 'name': 'Backup 1', + 'backup_type': 'daily', + 'rotation': 1, + }, + } + + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(501, response.status_int) + + def test_create_backup_with_metadata(self): + self.flags(allow_admin_api=True) + + body = { + 'createBackup': { + 'name': 'Backup 1', + 'backup_type': 'daily', + 'rotation': 1, + 'metadata': {'123': 'asdf'}, + }, + } + + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(202, response.status_int) + self.assertTrue(response.headers['Location']) + + def test_create_backup_no_name(self): + """Name is required for backups""" + self.flags(allow_admin_api=True) + + body = { + 'createBackup': { + 'backup_type': 'daily', + 'rotation': 1, + }, + } + + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_backup_no_rotation(self): + """Rotation is required for backup requests""" + self.flags(allow_admin_api=True) + + body = { + 'createBackup': { + 'name': 'Backup 1', + 'backup_type': 'daily', + }, + } + + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_backup_no_backup_type(self): + """Backup Type (daily or weekly) is required for backup requests""" + self.flags(allow_admin_api=True) + + body = { + 'createBackup': { + 'name': 'Backup 1', + 'rotation': 1, + }, + } + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_backup_bad_entity(self): + self.flags(allow_admin_api=True) + + body = {'createBackup': 'go'} + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + +class ServerActionsTestV11(test.TestCase): + + def setUp(self): + self.maxDiff = None + super(ServerActionsTestV11, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_auth(self.stubs) + self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id) + self.stubs.Set(nova.db.api, 'instance_update', instance_update) + + fakes.stub_out_glance(self.stubs) + fakes.stub_out_compute_api_snapshot(self.stubs) + service_class = 'nova.image.glance.GlanceImageService' + self.service = utils.import_object(service_class) + self.context = context.RequestContext(1, None) + self.service.delete_all() + self.sent_to_glance = {} + fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance) + self.flags(allow_instance_snapshots=True) + + def tearDown(self): + self.stubs.UnsetAll() + + def test_server_change_password(self): + mock_method = MockSetAdminPassword() + self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method) + body = {'changePassword': {'adminPass': '1234pass'}} + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(mock_method.instance_id, '1') + self.assertEqual(mock_method.password, '1234pass') + + def test_server_change_password_xml(self): + mock_method = MockSetAdminPassword() + self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method) + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = "application/xml" + req.body = """<?xml version="1.0" encoding="UTF-8"?> + <changePassword + xmlns="http://docs.openstack.org/compute/api/v1.1" + adminPass="1234pass"/>""" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(mock_method.instance_id, '1') + self.assertEqual(mock_method.password, '1234pass') + + def test_server_change_password_not_a_string(self): + body = {'changePassword': {'adminPass': 1234}} + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_change_password_bad_request(self): + body = {'changePassword': {'pass': '12345'}} + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_change_password_empty_string(self): + body = {'changePassword': {'adminPass': ''}} + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_change_password_none(self): + body = {'changePassword': {'adminPass': None}} + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_reboot_hard(self): + body = dict(reboot=dict(type="HARD")) + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_server_reboot_soft(self): + body = dict(reboot=dict(type="SOFT")) + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_server_reboot_incorrect_type(self): + body = dict(reboot=dict(type="NOT_A_TYPE")) + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_reboot_missing_type(self): + body = dict(reboot=dict()) + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_rebuild_accepted_minimum(self): + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_server_rebuild_rejected_when_building(self): + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + }, + } + + state = power_state.BUILDING + new_return_server = return_server_with_power_state(state) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + self.stubs.Set(nova.db, 'instance_get_by_uuid', + return_server_with_uuid_and_power_state(state)) + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 409) + + def test_server_rebuild_accepted_with_metadata(self): + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + "metadata": { + "new": "metadata", + }, + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_server_rebuild_accepted_with_bad_metadata(self): + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + "metadata": "stack", + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_rebuild_bad_entity(self): + body = { + "rebuild": { + "imageId": 2, + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_rebuild_bad_personality(self): + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + "personality": [{ + "path": "/path/to/file", + "contents": "INVALID b64", + }] + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_rebuild_personality(self): + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + "personality": [{ + "path": "/path/to/file", + "contents": base64.b64encode("Test String"), + }] + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_resize_server(self): + + req = webob.Request.blank('/v1.1/servers/1/action') + req.content_type = 'application/json' + req.method = 'POST' + body_dict = dict(resize=dict(flavorRef="http://localhost/3")) + req.body = json.dumps(body_dict) + + self.resize_called = False + + def resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + + def test_resize_server_no_flavor(self): + req = webob.Request.blank('/v1.1/servers/1/action') + req.content_type = 'application/json' + req.method = 'POST' + body_dict = dict(resize=dict()) + req.body = json.dumps(body_dict) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_resize_server_no_flavor_ref(self): + req = webob.Request.blank('/v1.1/servers/1/action') + req.content_type = 'application/json' + req.method = 'POST' + body_dict = dict(resize=dict(flavorRef=None)) + req.body = json.dumps(body_dict) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_confirm_resize_server(self): + req = webob.Request.blank('/v1.1/servers/1/action') + req.content_type = 'application/json' + req.method = 'POST' + body_dict = dict(confirmResize=None) + req.body = json.dumps(body_dict) + + self.confirm_resize_called = False + + def cr_mock(*args): + self.confirm_resize_called = True + + self.stubs.Set(nova.compute.api.API, 'confirm_resize', cr_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 204) + self.assertEqual(self.confirm_resize_called, True) + + def test_revert_resize_server(self): + req = webob.Request.blank('/v1.1/servers/1/action') + req.content_type = 'application/json' + req.method = 'POST' + body_dict = dict(revertResize=None) + req.body = json.dumps(body_dict) + + self.revert_resize_called = False + + def revert_mock(*args): + self.revert_resize_called = True + + self.stubs.Set(nova.compute.api.API, 'revert_resize', revert_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.revert_resize_called, True) + + def test_create_image(self): + body = { + 'createImage': { + 'name': 'Snapshot 1', + }, + } + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(202, response.status_int) + location = response.headers['Location'] + self.assertEqual('http://localhost/v1.1/images/123', location) + + def test_create_image_snapshots_disabled(self): + """Don't permit a snapshot if the allow_instance_snapshots flag is + False + """ + self.flags(allow_instance_snapshots=False) + body = { + 'createImage': { + 'name': 'Snapshot 1', + }, + } + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_image_with_metadata(self): + body = { + 'createImage': { + 'name': 'Snapshot 1', + 'metadata': {'key': 'asdf'}, + }, + } + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(202, response.status_int) + location = response.headers['Location'] + self.assertEqual('http://localhost/v1.1/images/123', location) + + def test_create_image_no_name(self): + body = { + 'createImage': {}, + } + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_image_bad_metadata(self): + body = { + 'createImage': { + 'name': 'geoff', + 'metadata': 'henry', + }, + } + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_backup(self): + """The happy path for creating backups""" + self.flags(allow_admin_api=True) + + body = { + 'createBackup': { + 'name': 'Backup 1', + 'backup_type': 'daily', + 'rotation': 1, + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(202, response.status_int) + self.assertTrue(response.headers['Location']) + + +class TestServerActionXMLDeserializerV11(test.TestCase): + + def setUp(self): + self.deserializer = create_instance_helper.ServerXMLDeserializerV11() + + def tearDown(self): + pass + + def test_create_image(self): + serial_request = """ +<createImage xmlns="http://docs.openstack.org/compute/api/v1.1" + name="new-server-test"/>""" + request = self.deserializer.deserialize(serial_request, 'action') + expected = { + "createImage": { + "name": "new-server-test", + }, + } + self.assertEquals(request['body'], expected) + + def test_create_image_with_metadata(self): + serial_request = """ +<createImage xmlns="http://docs.openstack.org/compute/api/v1.1" + name="new-server-test"> + <metadata> + <meta key="key1">value1</meta> + </metadata> +</createImage>""" + request = self.deserializer.deserialize(serial_request, 'action') + expected = { + "createImage": { + "name": "new-server-test", + "metadata": {"key1": "value1"}, + }, + } + self.assertEquals(request['body'], expected) + + def test_change_pass(self): + serial_request = """<?xml version="1.0" encoding="UTF-8"?> + <changePassword + xmlns="http://docs.openstack.org/compute/api/v1.1" + adminPass="1234pass"/> """ + request = self.deserializer.deserialize(serial_request, 'action') + expected = { + "changePassword": { + "adminPass": "1234pass", + }, + } + self.assertEquals(request['body'], expected) + + def test_change_pass_no_pass(self): + serial_request = """<?xml version="1.0" encoding="UTF-8"?> + <changePassword + xmlns="http://docs.openstack.org/compute/api/v1.1"/> """ + self.assertRaises(AttributeError, + self.deserializer.deserialize, + serial_request, + 'action') + + def test_reboot(self): + serial_request = """<?xml version="1.0" encoding="UTF-8"?> + <reboot + xmlns="http://docs.openstack.org/compute/api/v1.1" + type="HARD"/>""" + request = self.deserializer.deserialize(serial_request, 'action') + expected = { + "reboot": { + "type": "HARD", + }, + } + self.assertEquals(request['body'], expected) + + def test_reboot_no_type(self): + serial_request = """<?xml version="1.0" encoding="UTF-8"?> + <reboot + xmlns="http://docs.openstack.org/compute/api/v1.1"/>""" + self.assertRaises(AttributeError, + self.deserializer.deserialize, + serial_request, + 'action') + + def test_resize(self): + serial_request = """<?xml version="1.0" encoding="UTF-8"?> + <resize + xmlns="http://docs.openstack.org/compute/api/v1.1" + flavorRef="http://localhost/flavors/3"/>""" + request = self.deserializer.deserialize(serial_request, 'action') + expected = { + "resize": { + "flavorRef": "http://localhost/flavors/3" + }, + } + self.assertEquals(request['body'], expected) + + def test_resize_no_flavor_ref(self): + serial_request = """<?xml version="1.0" encoding="UTF-8"?> + <resize + xmlns="http://docs.openstack.org/compute/api/v1.1"/>""" + self.assertRaises(AttributeError, + self.deserializer.deserialize, + serial_request, + 'action') + + def test_confirm_resize(self): + serial_request = """<?xml version="1.0" encoding="UTF-8"?> + <confirmResize + xmlns="http://docs.openstack.org/compute/api/v1.1"/>""" + request = self.deserializer.deserialize(serial_request, 'action') + expected = { + "confirmResize": None, + } + self.assertEquals(request['body'], expected) + + def test_revert_resize(self): + serial_request = """<?xml version="1.0" encoding="UTF-8"?> + <revertResize + xmlns="http://docs.openstack.org/compute/api/v1.1"/>""" + request = self.deserializer.deserialize(serial_request, 'action') + expected = { + "revertResize": None, + } + self.assertEquals(request['body'], expected) + + def test_rebuild(self): + serial_request = """<?xml version="1.0" encoding="UTF-8"?> + <rebuild + xmlns="http://docs.openstack.org/compute/api/v1.1" + name="new-server-test" + imageRef="http://localhost/images/1"> + <metadata> + <meta key="My Server Name">Apache1</meta> + </metadata> + <personality> + <file path="/etc/banner.txt">Mg==</file> + </personality> + </rebuild>""" + request = self.deserializer.deserialize(serial_request, 'action') + expected = { + "rebuild": { + "name": "new-server-test", + "imageRef": "http://localhost/images/1", + "metadata": { + "My Server Name": "Apache1", + }, + "personality": [ + {"path": "/etc/banner.txt", "contents": "Mg=="}, + ], + }, + } + self.assertDictMatch(request['body'], expected) + + def test_rebuild_minimum(self): + serial_request = """<?xml version="1.0" encoding="UTF-8"?> + <rebuild + xmlns="http://docs.openstack.org/compute/api/v1.1" + imageRef="http://localhost/images/1"/>""" + request = self.deserializer.deserialize(serial_request, 'action') + expected = { + "rebuild": { + "imageRef": "http://localhost/images/1", + }, + } + self.assertDictMatch(request['body'], expected) + + def test_rebuild_no_imageRef(self): + serial_request = """<?xml version="1.0" encoding="UTF-8"?> + <rebuild + xmlns="http://docs.openstack.org/compute/api/v1.1" + name="new-server-test"> + <metadata> + <meta key="My Server Name">Apache1</meta> + </metadata> + <personality> + <file path="/etc/banner.txt">Mg==</file> + </personality> + </rebuild>""" + self.assertRaises(AttributeError, + self.deserializer.deserialize, + serial_request, + 'action') diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py index 0431e68d2..08a6a062a 100644 --- a/nova/tests/api/openstack/test_server_metadata.py +++ b/nova/tests/api/openstack/test_server_metadata.py @@ -16,14 +16,12 @@ # under the License. import json -import stubout -import unittest import webob - +from xml.dom import minidom from nova import exception from nova import flags -from nova.api import openstack +from nova import test from nova.tests.api.openstack import fakes import nova.wsgi @@ -53,11 +51,10 @@ def delete_server_metadata(context, server_id, key): def stub_server_metadata(): metadata = { - "key1": "value1", - "key2": "value2", - "key3": "value3", - "key4": "value4", - "key5": "value5"} + "key1": "value1", + "key2": "value2", + "key3": "value3", + } return metadata @@ -76,89 +73,130 @@ def return_server_nonexistant(context, server_id): raise exception.InstanceNotFound() -class ServerMetaDataTest(unittest.TestCase): +class ServerMetaDataTest(test.TestCase): def setUp(self): super(ServerMetaDataTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.auth_data = {} - fakes.FakeAuthDatabase.data = {} - fakes.stub_out_auth(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) self.stubs.Set(nova.db.api, 'instance_get', return_server) - def tearDown(self): - self.stubs.UnsetAll() - super(ServerMetaDataTest, self).tearDown() - def test_index(self): self.stubs.Set(nova.db.api, 'instance_metadata_get', return_server_metadata) - req = webob.Request.blank('/v1.1/servers/1/meta') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata') res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) + res_dict = json.loads(res.body) self.assertEqual('application/json', res.headers['Content-Type']) - self.assertEqual('value1', res_dict['metadata']['key1']) + expected = { + 'metadata': { + 'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3', + }, + } + self.assertEqual(expected, res_dict) + + def test_index_xml(self): + self.stubs.Set(nova.db.api, 'instance_metadata_get', + return_server_metadata) + request = webob.Request.blank("/v1.1/servers/1/metadata") + request.accept = "application/xml" + response = request.get_response(fakes.wsgi_app()) + self.assertEqual(200, response.status_int) + self.assertEqual("application/xml", response.content_type) + + actual_metadata = minidom.parseString(response.body.replace(" ", "")) + + expected_metadata = minidom.parseString(""" + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key="key3">value3</meta> + <meta key="key2">value2</meta> + <meta key="key1">value1</meta> + </metadata> + """.replace(" ", "").replace("\n", "")) + + self.assertEqual(expected_metadata.toxml(), actual_metadata.toxml()) def test_index_nonexistant_server(self): self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) - req = webob.Request.blank('/v1.1/servers/1/meta') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata') res = req.get_response(fakes.wsgi_app()) self.assertEqual(404, res.status_int) def test_index_no_data(self): self.stubs.Set(nova.db.api, 'instance_metadata_get', return_empty_server_metadata) - req = webob.Request.blank('/v1.1/servers/1/meta') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata') res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) - self.assertEqual('application/json', res.headers['Content-Type']) - self.assertEqual(0, len(res_dict['metadata'])) + res_dict = json.loads(res.body) + expected = {'metadata': {}} + self.assertEqual(expected, res_dict) def test_show(self): self.stubs.Set(nova.db.api, 'instance_metadata_get', return_server_metadata) - req = webob.Request.blank('/v1.1/servers/1/meta/key5') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata/key2') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) - self.assertEqual('application/json', res.headers['Content-Type']) - self.assertEqual('value5', res_dict['key5']) + expected = {'meta': {'key2': 'value2'}} + self.assertEqual(expected, res_dict) + + def test_show_xml(self): + self.stubs.Set(nova.db.api, 'instance_metadata_get', + return_server_metadata) + request = webob.Request.blank("/v1.1/servers/1/metadata/key2") + request.accept = "application/xml" + response = request.get_response(fakes.wsgi_app()) + self.assertEqual(200, response.status_int) + self.assertEqual("application/xml", response.content_type) + + actual_metadata = minidom.parseString(response.body.replace(" ", "")) + + expected_metadata = minidom.parseString(""" + <meta xmlns="http://docs.openstack.org/compute/api/v1.1" + key="key2">value2</meta> + """.replace(" ", "").replace("\n", "")) + + self.assertEqual(expected_metadata.toxml(), actual_metadata.toxml()) def test_show_nonexistant_server(self): self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) - req = webob.Request.blank('/v1.1/servers/1/meta/key5') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata/key2') res = req.get_response(fakes.wsgi_app()) self.assertEqual(404, res.status_int) def test_show_meta_not_found(self): self.stubs.Set(nova.db.api, 'instance_metadata_get', return_empty_server_metadata) - req = webob.Request.blank('/v1.1/servers/1/meta/key6') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata/key6') res = req.get_response(fakes.wsgi_app()) self.assertEqual(404, res.status_int) def test_delete(self): + self.stubs.Set(nova.db.api, 'instance_metadata_get', + return_server_metadata) self.stubs.Set(nova.db.api, 'instance_metadata_delete', delete_server_metadata) - req = webob.Request.blank('/v1.1/servers/1/meta/key5') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata/key2') req.method = 'DELETE' res = req.get_response(fakes.wsgi_app()) - self.assertEqual(200, res.status_int) + self.assertEqual(204, res.status_int) + self.assertEqual('', res.body) def test_delete_nonexistant_server(self): self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) - req = webob.Request.blank('/v1.1/servers/1/meta/key5') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata/key1') + req.method = 'DELETE' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(404, res.status_int) + + def test_delete_meta_not_found(self): + self.stubs.Set(nova.db.api, 'instance_metadata_get', + return_empty_server_metadata) + req = webob.Request.blank('/v1.1/servers/1/metadata/key6') req.method = 'DELETE' res = req.get_response(fakes.wsgi_app()) self.assertEqual(404, res.status_int) @@ -166,22 +204,45 @@ class ServerMetaDataTest(unittest.TestCase): def test_create(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', return_create_instance_metadata) - req = webob.Request.blank('/v1.1/servers/1/meta') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata') req.method = 'POST' - req.body = '{"metadata": {"key1": "value1"}}' - req.headers["content-type"] = "application/json" + req.content_type = "application/json" + expected = {"metadata": {"key1": "value1"}} + req.body = json.dumps(expected) res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) res_dict = json.loads(res.body) - self.assertEqual('application/json', res.headers['Content-Type']) - self.assertEqual('value1', res_dict['metadata']['key1']) + self.assertEqual(expected, res_dict) + + def test_create_xml(self): + self.stubs.Set(nova.db.api, "instance_metadata_update_or_create", + return_create_instance_metadata) + req = webob.Request.blank("/v1.1/servers/1/metadata") + req.method = "POST" + req.content_type = "application/xml" + req.accept = "application/xml" + + request_metadata = minidom.parseString(""" + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key="key3">value3</meta> + <meta key="key2">value2</meta> + <meta key="key1">value1</meta> + </metadata> + """.replace(" ", "").replace("\n", "")) + + req.body = str(request_metadata.toxml()) + response = req.get_response(fakes.wsgi_app()) + + self.assertEqual(200, response.status_int) + actual_metadata = minidom.parseString(response.body) + + self.assertEqual(request_metadata.toxml(), actual_metadata.toxml()) def test_create_empty_body(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', return_create_instance_metadata) - req = webob.Request.blank('/v1.1/servers/1/meta') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata') req.method = 'POST' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) @@ -189,34 +250,112 @@ class ServerMetaDataTest(unittest.TestCase): def test_create_nonexistant_server(self): self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) - req = webob.Request.blank('/v1.1/servers/100/meta') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/100/metadata') req.method = 'POST' req.body = '{"metadata": {"key1": "value1"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(404, res.status_int) + def test_update_all(self): + self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + return_create_instance_metadata) + req = webob.Request.blank('/v1.1/servers/1/metadata') + req.method = 'PUT' + req.content_type = "application/json" + expected = { + 'metadata': { + 'key10': 'value10', + 'key99': 'value99', + }, + } + req.body = json.dumps(expected) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) + res_dict = json.loads(res.body) + self.assertEqual(expected, res_dict) + + def test_update_all_empty_container(self): + self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + return_create_instance_metadata) + req = webob.Request.blank('/v1.1/servers/1/metadata') + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': {}} + req.body = json.dumps(expected) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) + res_dict = json.loads(res.body) + self.assertEqual(expected, res_dict) + + def test_update_all_malformed_container(self): + self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + return_create_instance_metadata) + req = webob.Request.blank('/v1.1/servers/1/metadata') + req.method = 'PUT' + req.content_type = "application/json" + expected = {'meta': {}} + req.body = json.dumps(expected) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) + + def test_update_all_malformed_data(self): + self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + return_create_instance_metadata) + req = webob.Request.blank('/v1.1/servers/1/metadata') + req.method = 'PUT' + req.content_type = "application/json" + expected = {'metadata': ['asdf']} + req.body = json.dumps(expected) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) + + def test_update_all_nonexistant_server(self): + self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) + req = webob.Request.blank('/v1.1/servers/100/metadata') + req.method = 'PUT' + req.content_type = "application/json" + req.body = json.dumps({'metadata': {'key10': 'value10'}}) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(404, res.status_int) + def test_update_item(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', return_create_instance_metadata) - req = webob.Request.blank('/v1.1/servers/1/meta/key1') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata/key1') req.method = 'PUT' - req.body = '{"key1": "value1"}' + req.body = '{"meta": {"key1": "value1"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) self.assertEqual('application/json', res.headers['Content-Type']) res_dict = json.loads(res.body) - self.assertEqual('value1', res_dict['key1']) + expected = {'meta': {'key1': 'value1'}} + self.assertEqual(expected, res_dict) + + def test_update_item_xml(self): + self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + return_create_instance_metadata) + req = webob.Request.blank('/v1.1/servers/1/metadata/key9') + req.method = 'PUT' + req.accept = "application/json" + req.content_type = "application/xml" + req.body = """ + <meta xmlns="http://docs.openstack.org/compute/api/v1.1" + key="key9">value9</meta> + """ + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) + self.assertEqual('application/json', res.headers['Content-Type']) + res_dict = json.loads(res.body) + expected = {'meta': {'key9': 'value9'}} + self.assertEqual(expected, res_dict) def test_update_item_nonexistant_server(self): self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) - req = webob.Request.blank('/v1.1/servers/asdf/100/key1') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/asdf/metadata/key1') req.method = 'PUT' - req.body = '{"key1": "value1"}' + req.body = '{"meta":{"key1": "value1"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(404, res.status_int) @@ -224,8 +363,7 @@ class ServerMetaDataTest(unittest.TestCase): def test_update_item_empty_body(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', return_create_instance_metadata) - req = webob.Request.blank('/v1.1/servers/1/meta/key1') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata/key1') req.method = 'PUT' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) @@ -234,10 +372,9 @@ class ServerMetaDataTest(unittest.TestCase): def test_update_item_too_many_keys(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', return_create_instance_metadata) - req = webob.Request.blank('/v1.1/servers/1/meta/key1') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata/key1') req.method = 'PUT' - req.body = '{"key1": "value1", "key2": "value2"}' + req.body = '{"meta": {"key1": "value1", "key2": "value2"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) @@ -245,10 +382,9 @@ class ServerMetaDataTest(unittest.TestCase): def test_update_item_body_uri_mismatch(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', return_create_instance_metadata) - req = webob.Request.blank('/v1.1/servers/1/meta/bad') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata/bad') req.method = 'PUT' - req.body = '{"key1": "value1"}' + req.body = '{"meta": {"key1": "value1"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) @@ -260,8 +396,7 @@ class ServerMetaDataTest(unittest.TestCase): for num in range(FLAGS.quota_metadata_items + 1): data['metadata']['key%i' % num] = "blah" json_string = str(data).replace("\'", "\"") - req = webob.Request.blank('/v1.1/servers/1/meta') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata') req.method = 'POST' req.body = json_string req.headers["content-type"] = "application/json" @@ -271,10 +406,9 @@ class ServerMetaDataTest(unittest.TestCase): def test_to_many_metadata_items_on_update_item(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', return_create_instance_metadata_max) - req = webob.Request.blank('/v1.1/servers/1/meta/key1') - req.environ['api.version'] = '1.1' + req = webob.Request.blank('/v1.1/servers/1/metadata/key1') req.method = 'PUT' - req.body = '{"a new key": "a new value"}' + req.body = '{"meta": {"a new key": "a new value"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 4027ef829..fd06b2e64 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -21,13 +21,11 @@ import json import unittest from xml.dom import minidom -import stubout import webob from nova import context from nova import db from nova import exception -from nova import flags from nova import test from nova import utils import nova.api.openstack @@ -47,10 +45,6 @@ from nova.tests.api.openstack import common from nova.tests.api.openstack import fakes -FLAGS = flags.FLAGS -FLAGS.verbose = True - - FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' @@ -104,8 +98,8 @@ def return_server_with_uuid_and_power_state(power_state): return _return_server -def return_servers(context, user_id=1): - return [stub_instance(i, user_id) for i in xrange(5)] +def return_servers(context, *args, **kwargs): + return [stub_instance(i, 'fake', 'fake') for i in xrange(5)] def return_servers_by_reservation(context, reservation_id=""): @@ -148,10 +142,10 @@ def instance_addresses(context, instance_id): return None -def stub_instance(id, user_id=1, private_address=None, public_addresses=None, - host=None, power_state=0, reservation_id="", - uuid=FAKE_UUID, image_ref="10", flavor_id="1", - interfaces=None): +def stub_instance(id, user_id='fake', project_id='fake', private_address=None, + public_addresses=None, host=None, power_state=0, + reservation_id="", uuid=FAKE_UUID, image_ref="10", + flavor_id="1", interfaces=None): metadata = [] metadata.append(InstanceMetadata(key='seq', value=id)) @@ -177,7 +171,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None, "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "admin_pass": "", "user_id": user_id, - "project_id": "", + "project_id": project_id, "image_ref": image_ref, "kernel_id": "", "ramdisk_id": "", @@ -236,12 +230,9 @@ class ServersTest(test.TestCase): def setUp(self): self.maxDiff = None super(ServersTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} + self.flags(verbose=True) fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) - fakes.stub_out_auth(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) fakes.stub_out_image_service(self.stubs) self.stubs.Set(utils, 'gen_uuid', fake_gen_uuid) @@ -249,7 +240,7 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id) self.stubs.Set(nova.db, 'instance_get_by_uuid', return_server_by_uuid) - self.stubs.Set(nova.db.api, 'instance_get_all_by_user', + self.stubs.Set(nova.db.api, 'instance_get_all_by_project', return_servers) self.stubs.Set(nova.db.api, 'instance_add_security_group', return_security_group) @@ -264,15 +255,9 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.compute.API, 'resume', fake_compute_api) self.stubs.Set(nova.compute.API, "get_diagnostics", fake_compute_api) self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api) - self.allow_admin = FLAGS.allow_admin_api self.webreq = common.webob_factory('/v1.0/servers') - def tearDown(self): - self.stubs.UnsetAll() - FLAGS.allow_admin_api = self.allow_admin - super(ServersTest, self).tearDown() - def test_get_server_by_id(self): req = webob.Request.blank('/v1.0/servers/1') res = req.get_response(fakes.wsgi_app()) @@ -767,7 +752,7 @@ class ServersTest(test.TestCase): self.assertEquals(ip.getAttribute('addr'), private) def test_get_server_by_id_with_addresses_v1_1(self): - FLAGS.use_ipv6 = True + self.flags(use_ipv6=True) interfaces = [ { 'network': {'label': 'network_1'}, @@ -811,7 +796,7 @@ class ServersTest(test.TestCase): self.assertEqual(addresses, expected) def test_get_server_by_id_with_addresses_v1_1_ipv6_disabled(self): - FLAGS.use_ipv6 = False + self.flags(use_ipv6=False) interfaces = [ { 'network': {'label': 'network_1'}, @@ -854,7 +839,7 @@ class ServersTest(test.TestCase): self.assertEqual(addresses, expected) def test_get_server_addresses_v1_1(self): - FLAGS.use_ipv6 = True + self.flags(use_ipv6=True) interfaces = [ { 'network': {'label': 'network_1'}, @@ -905,7 +890,7 @@ class ServersTest(test.TestCase): self.assertEqual(res_dict, expected) def test_get_server_addresses_single_network_v1_1(self): - FLAGS.use_ipv6 = True + self.flags(use_ipv6=True) interfaces = [ { 'network': {'label': 'network_1'}, @@ -972,6 +957,7 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) + self.assertEqual(len(res_dict['servers']), 5) i = 0 for s in res_dict['servers']: self.assertEqual(s['id'], i) @@ -1035,6 +1021,7 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) + self.assertEqual(len(res_dict['servers']), 5) for i, s in enumerate(res_dict['servers']): self.assertEqual(s['id'], i) self.assertEqual(s['name'], 'server%d' % i) @@ -1197,7 +1184,7 @@ class ServersTest(test.TestCase): def test_create_instance_via_zones(self): """Server generated ReservationID""" self._setup_for_create_instance() - FLAGS.allow_admin_api = True + self.flags(allow_admin_api=True) body = dict(server=dict( name='server_test', imageId=3, flavorId=2, @@ -1219,7 +1206,7 @@ class ServersTest(test.TestCase): def test_create_instance_via_zones_with_resid(self): """User supplied ReservationID""" self._setup_for_create_instance() - FLAGS.allow_admin_api = True + self.flags(allow_admin_api=True) body = dict(server=dict( name='server_test', imageId=3, flavorId=2, @@ -1323,7 +1310,8 @@ class ServersTest(test.TestCase): def test_create_instance_v1_1(self): self._setup_for_create_instance() - image_href = 'http://localhost/images/2' + # proper local hrefs must start with 'http://localhost/v1.1/' + image_href = 'http://localhost/v1.1/images/2' flavor_ref = 'http://localhost/flavors/3' expected_flavor = { "id": "3", @@ -1728,10 +1716,11 @@ class ServersTest(test.TestCase): instances - 2 on one host and 3 on another. ''' - def return_servers_with_host(context, user_id=1): - return [stub_instance(i, 1, None, None, i % 2) for i in xrange(5)] + def return_servers_with_host(context, *args, **kwargs): + return [stub_instance(i, 'fake', 'fake', None, None, i % 2) + for i in xrange(5)] - self.stubs.Set(nova.db.api, 'instance_get_all_by_user', + self.stubs.Set(nova.db.api, 'instance_get_all_by_project', return_servers_with_host) req = webob.Request.blank('/v1.0/servers/detail') @@ -1751,340 +1740,68 @@ class ServersTest(test.TestCase): self.assertEqual(s['flavorId'], 1) def test_server_pause(self): - FLAGS.allow_admin_api = True - body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, - personality={})) + self.flags(allow_admin_api=True) req = webob.Request.blank('/v1.0/servers/1/pause') req.method = 'POST' req.content_type = 'application/json' - req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) def test_server_unpause(self): - FLAGS.allow_admin_api = True - body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, - personality={})) + self.flags(allow_admin_api=True) req = webob.Request.blank('/v1.0/servers/1/unpause') req.method = 'POST' req.content_type = 'application/json' - req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) def test_server_suspend(self): - FLAGS.allow_admin_api = True - body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, - personality={})) + self.flags(allow_admin_api=True) req = webob.Request.blank('/v1.0/servers/1/suspend') req.method = 'POST' req.content_type = 'application/json' - req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) def test_server_resume(self): - FLAGS.allow_admin_api = True - body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, - personality={})) + self.flags(allow_admin_api=True) req = webob.Request.blank('/v1.0/servers/1/resume') req.method = 'POST' req.content_type = 'application/json' - req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) def test_server_reset_network(self): - FLAGS.allow_admin_api = True - body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, - personality={})) + self.flags(allow_admin_api=True) req = webob.Request.blank('/v1.0/servers/1/reset_network') req.method = 'POST' req.content_type = 'application/json' - req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) def test_server_inject_network_info(self): - FLAGS.allow_admin_api = True - body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, - personality={})) + self.flags(allow_admin_api=True) req = webob.Request.blank( '/v1.0/servers/1/inject_network_info') req.method = 'POST' req.content_type = 'application/json' - req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) def test_server_diagnostics(self): + self.flags(allow_admin_api=False) req = webob.Request.blank("/v1.0/servers/1/diagnostics") req.method = "GET" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 404) def test_server_actions(self): + self.flags(allow_admin_api=False) req = webob.Request.blank("/v1.0/servers/1/actions") req.method = "GET" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 404) - def test_server_change_password(self): - body = {'changePassword': {'adminPass': '1234pass'}} - req = webob.Request.blank('/v1.0/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 501) - - def test_server_change_password_xml(self): - req = webob.Request.blank('/v1.0/servers/1/action') - req.method = 'POST' - req.content_type = 'application/xml' - req.body = '<changePassword adminPass="1234pass">' -# res = req.get_response(fakes.wsgi_app()) -# self.assertEqual(res.status_int, 501) - - def test_server_change_password_v1_1(self): - mock_method = MockSetAdminPassword() - self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method) - body = {'changePassword': {'adminPass': '1234pass'}} - req = webob.Request.blank('/v1.1/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 202) - self.assertEqual(mock_method.instance_id, '1') - self.assertEqual(mock_method.password, '1234pass') - - def test_server_change_password_bad_request_v1_1(self): - body = {'changePassword': {'pass': '12345'}} - req = webob.Request.blank('/v1.1/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 400) - - def test_server_change_password_empty_string_v1_1(self): - body = {'changePassword': {'adminPass': ''}} - req = webob.Request.blank('/v1.1/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 400) - - def test_server_change_password_none_v1_1(self): - body = {'changePassword': {'adminPass': None}} - req = webob.Request.blank('/v1.1/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 400) - - def test_server_change_password_not_a_string_v1_1(self): - body = {'changePassword': {'adminPass': 1234}} - req = webob.Request.blank('/v1.1/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 400) - - def test_server_reboot(self): - body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, - personality={})) - req = webob.Request.blank('/v1.0/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - res = req.get_response(fakes.wsgi_app()) - - def test_server_rebuild_accepted(self): - body = { - "rebuild": { - "imageId": 2, - }, - } - - req = webob.Request.blank('/v1.0/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 202) - self.assertEqual(res.body, "") - - def test_server_rebuild_rejected_when_building(self): - body = { - "rebuild": { - "imageId": 2, - }, - } - - state = power_state.BUILDING - new_return_server = return_server_with_power_state(state) - self.stubs.Set(nova.db.api, 'instance_get', new_return_server) - self.stubs.Set(nova.db, 'instance_get_by_uuid', - return_server_with_uuid_and_power_state(state)) - - req = webob.Request.blank('/v1.0/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 409) - - def test_server_rebuild_bad_entity(self): - body = { - "rebuild": { - }, - } - - req = webob.Request.blank('/v1.0/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 400) - - def test_server_rebuild_accepted_minimum_v1_1(self): - body = { - "rebuild": { - "imageRef": "http://localhost/images/2", - }, - } - - req = webob.Request.blank('/v1.1/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 202) - - def test_server_rebuild_rejected_when_building_v1_1(self): - body = { - "rebuild": { - "imageRef": "http://localhost/images/2", - }, - } - - state = power_state.BUILDING - new_return_server = return_server_with_power_state(state) - self.stubs.Set(nova.db.api, 'instance_get', new_return_server) - self.stubs.Set(nova.db, 'instance_get_by_uuid', - return_server_with_uuid_and_power_state(state)) - - req = webob.Request.blank('/v1.1/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 409) - - def test_server_rebuild_accepted_with_metadata_v1_1(self): - body = { - "rebuild": { - "imageRef": "http://localhost/images/2", - "metadata": { - "new": "metadata", - }, - }, - } - - req = webob.Request.blank('/v1.1/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 202) - - def test_server_rebuild_accepted_with_bad_metadata_v1_1(self): - body = { - "rebuild": { - "imageRef": "http://localhost/images/2", - "metadata": "stack", - }, - } - - req = webob.Request.blank('/v1.1/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 400) - - def test_server_rebuild_bad_entity_v1_1(self): - body = { - "rebuild": { - "imageId": 2, - }, - } - - req = webob.Request.blank('/v1.1/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 400) - - def test_server_rebuild_bad_personality_v1_1(self): - body = { - "rebuild": { - "imageRef": "http://localhost/images/2", - "personality": [{ - "path": "/path/to/file", - "contents": "INVALID b64", - }] - }, - } - - req = webob.Request.blank('/v1.1/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 400) - - def test_server_rebuild_personality_v1_1(self): - body = { - "rebuild": { - "imageRef": "http://localhost/images/2", - "personality": [{ - "path": "/path/to/file", - "contents": base64.b64encode("Test String"), - }] - }, - } - - req = webob.Request.blank('/v1.1/servers/1/action') - req.method = 'POST' - req.content_type = 'application/json' - req.body = json.dumps(body) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 202) - def test_delete_server_instance(self): req = webob.Request.blank('/v1.0/servers/1') req.method = 'DELETE' @@ -2102,7 +1819,7 @@ class ServersTest(test.TestCase): self.assertEqual(self.server_delete_called, True) def test_rescue_accepted(self): - FLAGS.allow_admin_api = True + self.flags(allow_admin_api=True) body = {} self.called = False @@ -2121,7 +1838,7 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 202) def test_rescue_raises_handled(self): - FLAGS.allow_admin_api = True + self.flags(allow_admin_api=True) body = {} def rescue_mock(*args, **kwargs): @@ -2152,147 +1869,6 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 204) self.assertEqual(self.server_delete_called, True) - def test_resize_server(self): - req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) - - self.resize_called = False - - def resize_mock(*args): - self.resize_called = True - - self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 202) - self.assertEqual(self.resize_called, True) - - def test_resize_server_v11(self): - - req = webob.Request.blank('/v1.1/servers/1/action') - req.content_type = 'application/json' - req.method = 'POST' - body_dict = dict(resize=dict(flavorRef="http://localhost/3")) - req.body = json.dumps(body_dict) - - self.resize_called = False - - def resize_mock(*args): - self.resize_called = True - - self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 202) - self.assertEqual(self.resize_called, True) - - def test_resize_bad_flavor_fails(self): - req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3))) - - self.resize_called = False - - def resize_mock(*args): - self.resize_called = True - - self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 422) - self.assertEqual(self.resize_called, False) - - def test_resize_raises_fails(self): - req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) - - def resize_mock(*args): - raise Exception('hurr durr') - - self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 500) - - def test_resized_server_has_correct_status(self): - req = self.webreq('/1', 'GET') - - def fake_migration_get(*args): - return {} - - self.stubs.Set(nova.db, 'migration_get_by_instance_and_status', - fake_migration_get) - res = req.get_response(fakes.wsgi_app()) - body = json.loads(res.body) - self.assertEqual(body['server']['status'], 'RESIZE-CONFIRM') - - def test_confirm_resize_server(self): - req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) - - self.resize_called = False - - def confirm_resize_mock(*args): - self.resize_called = True - - self.stubs.Set(nova.compute.api.API, 'confirm_resize', - confirm_resize_mock) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 204) - self.assertEqual(self.resize_called, True) - - def test_confirm_resize_server_fails(self): - req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) - - def confirm_resize_mock(*args): - raise Exception('hurr durr') - - self.stubs.Set(nova.compute.api.API, 'confirm_resize', - confirm_resize_mock) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 400) - - def test_revert_resize_server(self): - req = self.webreq('/1/action', 'POST', dict(revertResize=None)) - - self.resize_called = False - - def revert_resize_mock(*args): - self.resize_called = True - - self.stubs.Set(nova.compute.api.API, 'revert_resize', - revert_resize_mock) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 202) - self.assertEqual(self.resize_called, True) - - def test_revert_resize_server_fails(self): - req = self.webreq('/1/action', 'POST', dict(revertResize=None)) - - def revert_resize_mock(*args): - raise Exception('hurr durr') - - self.stubs.Set(nova.compute.api.API, 'revert_resize', - revert_resize_mock) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 400) - - def test_migrate_server(self): - """This is basically the same as resize, only we provide the `migrate` - attribute in the body's dict. - """ - req = self.webreq('/1/action', 'POST', dict(migrate=None)) - - self.resize_called = False - - def resize_mock(*args): - self.resize_called = True - - self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) - - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 202) - self.assertEqual(self.resize_called, True) - def test_shutdown_status(self): new_server = return_server_with_power_state(power_state.SHUTDOWN) self.stubs.Set(nova.db.api, 'instance_get', new_server) @@ -2597,10 +2173,11 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", self.assertEqual(request['body'], expected) -class TestServerCreateRequestXMLDeserializerV11(unittest.TestCase): +class TestServerCreateRequestXMLDeserializerV11(test.TestCase): def setUp(self): - self.deserializer = create_instance_helper.ServerXMLDeserializer() + super(TestServerCreateRequestXMLDeserializerV11, self).setUp() + self.deserializer = create_instance_helper.ServerXMLDeserializerV11() def test_minimal_request(self): serial_request = """ @@ -2614,8 +2191,6 @@ class TestServerCreateRequestXMLDeserializerV11(unittest.TestCase): "name": "new-server-test", "imageRef": "1", "flavorRef": "2", - "metadata": {}, - "personality": [], }, } self.assertEquals(request['body'], expected) @@ -2634,8 +2209,6 @@ class TestServerCreateRequestXMLDeserializerV11(unittest.TestCase): "imageRef": "1", "flavorRef": "2", "adminPass": "1234", - "metadata": {}, - "personality": [], }, } self.assertEquals(request['body'], expected) @@ -2652,8 +2225,6 @@ class TestServerCreateRequestXMLDeserializerV11(unittest.TestCase): "name": "new-server-test", "imageRef": "http://localhost:8774/v1.1/images/2", "flavorRef": "3", - "metadata": {}, - "personality": [], }, } self.assertEquals(request['body'], expected) @@ -2670,8 +2241,6 @@ class TestServerCreateRequestXMLDeserializerV11(unittest.TestCase): "name": "new-server-test", "imageRef": "1", "flavorRef": "http://localhost:8774/v1.1/flavors/3", - "metadata": {}, - "personality": [], }, } self.assertEquals(request['body'], expected) @@ -2715,7 +2284,6 @@ class TestServerCreateRequestXMLDeserializerV11(unittest.TestCase): "imageRef": "1", "flavorRef": "2", "metadata": {"one": "two", "open": "snack"}, - "personality": [], }, } self.assertEquals(request['body'], expected) @@ -2737,14 +2305,13 @@ class TestServerCreateRequestXMLDeserializerV11(unittest.TestCase): "name": "new-server-test", "imageRef": "1", "flavorRef": "2", - "metadata": {}, "personality": [ {"path": "/etc/banner.txt", "contents": "MQ=="}, {"path": "/etc/hosts", "contents": "Mg=="}, ], }, } - self.assertEquals(request['body'], expected) + self.assertDictMatch(request['body'], expected) def test_spec_request(self): image_bookmark_link = "http://servers.api.openstack.org/1234/" + \ @@ -2780,7 +2347,7 @@ class TestServerCreateRequestXMLDeserializerV11(unittest.TestCase): self.assertEquals(request['body'], expected) -class TextAddressesXMLSerialization(test.TestCase): +class TestAddressesXMLSerialization(test.TestCase): serializer = nova.api.openstack.ips.IPXMLSerializer() @@ -2840,18 +2407,8 @@ class TestServerInstanceCreation(test.TestCase): def setUp(self): super(TestServerInstanceCreation, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.auth_data = {} - fakes.FakeAuthDatabase.data = {} - fakes.stub_out_auth(self.stubs) fakes.stub_out_image_service(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) - self.allow_admin = FLAGS.allow_admin_api - - def tearDown(self): - self.stubs.UnsetAll() - FLAGS.allow_admin_api = self.allow_admin - super(TestServerInstanceCreation, self).tearDown() def _setup_mock_compute_api_for_personality(self): diff --git a/nova/tests/api/openstack/test_shared_ip_groups.py b/nova/tests/api/openstack/test_shared_ip_groups.py index c2bd7e45a..36fa1de0f 100644 --- a/nova/tests/api/openstack/test_shared_ip_groups.py +++ b/nova/tests/api/openstack/test_shared_ip_groups.py @@ -15,26 +15,13 @@ # License for the specific language governing permissions and limitations # under the License. -import stubout import webob from nova import test -from nova.api.openstack import shared_ip_groups from nova.tests.api.openstack import fakes class SharedIpGroupsTest(test.TestCase): - def setUp(self): - super(SharedIpGroupsTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} - fakes.stub_out_auth(self.stubs) - - def tearDown(self): - self.stubs.UnsetAll() - super(SharedIpGroupsTest, self).tearDown() - def test_get_shared_ip_groups(self): req = webob.Request.blank('/v1.0/shared_ip_groups') res = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/api/openstack/test_users.py b/nova/tests/api/openstack/test_users.py index effb2f592..1d133f9ab 100644 --- a/nova/tests/api/openstack/test_users.py +++ b/nova/tests/api/openstack/test_users.py @@ -15,10 +15,8 @@ import json -import stubout import webob -from nova import flags from nova import test from nova import utils from nova.api.openstack import users @@ -26,10 +24,6 @@ from nova.auth.manager import User, Project from nova.tests.api.openstack import fakes -FLAGS = flags.FLAGS -FLAGS.verbose = True - - def fake_init(self): self.manager = fakes.FakeAuthManager() @@ -41,7 +35,7 @@ def fake_admin_check(self, req): class UsersTest(test.TestCase): def setUp(self): super(UsersTest, self).setUp() - self.stubs = stubout.StubOutForTesting() + self.flags(verbose=True, allow_admin_api=True) self.stubs.Set(users.Controller, '__init__', fake_init) self.stubs.Set(users.Controller, '_check_admin', @@ -57,17 +51,10 @@ class UsersTest(test.TestCase): fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_auth(self.stubs) - self.allow_admin = FLAGS.allow_admin_api - FLAGS.allow_admin_api = True fakemgr = fakes.FakeAuthManager() fakemgr.add_user(User('id1', 'guy1', 'acc1', 'secret1', False)) fakemgr.add_user(User('id2', 'guy2', 'acc2', 'secret2', True)) - def tearDown(self): - self.stubs.UnsetAll() - FLAGS.allow_admin_api = self.allow_admin - super(UsersTest, self).tearDown() - def test_get_user_list(self): req = webob.Request.blank('/v1.0/users') res = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index da964ee1f..1269f13c9 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -16,21 +16,92 @@ # under the License. import json +import stubout import webob +import xml.etree.ElementTree + from nova import context from nova import test from nova.tests.api.openstack import fakes from nova.api.openstack import versions from nova.api.openstack import views +from nova.api.openstack import wsgi + +VERSIONS = { + "v1.0": { + "id": "v1.0", + "status": "DEPRECATED", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/cs-devguide-20110125.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.0+xml", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.0+json", + }, + ], + }, + "v1.1": { + "id": "v1.1", + "status": "CURRENT", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/cs-devguide-20110125.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.1+xml", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.1+json", + }, + ], + }, +} class VersionsTest(test.TestCase): def setUp(self): super(VersionsTest, self).setUp() self.context = context.get_admin_context() + self.stubs = stubout.StubOutForTesting() + fakes.stub_out_auth(self.stubs) + #Stub out VERSIONS + self.old_versions = versions.VERSIONS + versions.VERSIONS = VERSIONS def tearDown(self): + versions.VERSIONS = self.old_versions super(VersionsTest, self).tearDown() def test_get_version_list(self): @@ -44,7 +115,7 @@ class VersionsTest(test.TestCase): { "id": "v1.1", "status": "CURRENT", - "updated": "2011-07-18T11:30:00Z", + "updated": "2011-01-21T11:33:21Z", "links": [ { "rel": "self", @@ -54,7 +125,7 @@ class VersionsTest(test.TestCase): { "id": "v1.0", "status": "DEPRECATED", - "updated": "2010-10-09T11:30:00Z", + "updated": "2011-01-21T11:33:21Z", "links": [ { "rel": "self", @@ -64,6 +135,183 @@ class VersionsTest(test.TestCase): ] self.assertEqual(versions, expected) + def test_get_version_1_0_detail(self): + req = webob.Request.blank('/v1.0/') + req.accept = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, "application/json") + version = json.loads(res.body) + expected = { + "version": { + "id": "v1.0", + "status": "DEPRECATED", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.0/", + }, + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/cs-devguide-20110125.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/" + "vnd.openstack.compute-v1.0+xml", + }, + { + "base": "application/json", + "type": "application/" + "vnd.openstack.compute-v1.0+json", + }, + ], + }, + } + self.assertEqual(expected, version) + + def test_get_version_1_1_detail(self): + req = webob.Request.blank('/v1.1/') + req.accept = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, "application/json") + version = json.loads(res.body) + expected = { + "version": { + "id": "v1.1", + "status": "CURRENT", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/", + }, + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/cs-devguide-20110125.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/" + "vnd.openstack.compute-v1.1+xml", + }, + { + "base": "application/json", + "type": "application/" + "vnd.openstack.compute-v1.1+json", + }, + ], + }, + } + self.assertEqual(expected, version) + + def test_get_version_1_0_detail_xml(self): + req = webob.Request.blank('/v1.0/') + req.accept = "application/xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, "application/xml") + root = xml.etree.ElementTree.XML(res.body) + self.assertEqual(root.tag.split('}')[1], "version") + self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) + + children = list(root) + media_types = children[0] + media_type_nodes = list(media_types) + links = (children[1], children[2], children[3]) + + self.assertEqual(media_types.tag.split('}')[1], 'media-types') + for media_node in media_type_nodes: + self.assertEqual(media_node.tag.split('}')[1], 'media-type') + + expected = """ + <version id="v1.0" status="DEPRECATED" + updated="2011-01-21T11:33:21Z" + xmlns="%s" + xmlns:atom="http://www.w3.org/2005/Atom"> + + <media-types> + <media-type base="application/xml" + type="application/vnd.openstack.compute-v1.0+xml"/> + <media-type base="application/json" + type="application/vnd.openstack.compute-v1.0+json"/> + </media-types> + + <atom:link href="http://localhost/v1.0/" + rel="self"/> + + <atom:link href="http://docs.rackspacecloud.com/servers/ + api/v1.0/cs-devguide-20110125.pdf" + rel="describedby" + type="application/pdf"/> + + <atom:link href="http://docs.rackspacecloud.com/servers/ + api/v1.0/application.wadl" + rel="describedby" + type="application/vnd.sun.wadl+xml"/> + </version>""".replace(" ", "").replace("\n", "") % wsgi.XMLNS_V11 + + actual = res.body.replace(" ", "").replace("\n", "") + self.assertEqual(expected, actual) + + def test_get_version_1_1_detail_xml(self): + req = webob.Request.blank('/v1.1/') + req.accept = "application/xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual(res.content_type, "application/xml") + expected = """ + <version id="v1.1" status="CURRENT" + updated="2011-01-21T11:33:21Z" + xmlns="%s" + xmlns:atom="http://www.w3.org/2005/Atom"> + + <media-types> + <media-type base="application/xml" + type="application/vnd.openstack.compute-v1.1+xml"/> + <media-type base="application/json" + type="application/vnd.openstack.compute-v1.1+json"/> + </media-types> + + <atom:link href="http://localhost/v1.1/" + rel="self"/> + + <atom:link href="http://docs.rackspacecloud.com/servers/ + api/v1.1/cs-devguide-20110125.pdf" + rel="describedby" + type="application/pdf"/> + + <atom:link href="http://docs.rackspacecloud.com/servers/ + api/v1.1/application.wadl" + rel="describedby" + type="application/vnd.sun.wadl+xml"/> + </version>""".replace(" ", "").replace("\n", "") % wsgi.XMLNS_V11 + + actual = res.body.replace(" ", "").replace("\n", "") + self.assertEqual(expected, actual) + def test_get_version_list_xml(self): req = webob.Request.blank('/') req.accept = "application/xml" @@ -71,18 +319,94 @@ class VersionsTest(test.TestCase): self.assertEqual(res.status_int, 200) self.assertEqual(res.content_type, "application/xml") - expected = """<versions> - <version id="v1.1" status="CURRENT" updated="2011-07-18T11:30:00Z"> + expected = """ + <versions xmlns="%s" xmlns:atom="%s"> + <version id="v1.1" status="CURRENT" updated="2011-01-21T11:33:21Z"> <atom:link href="http://localhost/v1.1/" rel="self"/> </version> <version id="v1.0" status="DEPRECATED" - updated="2010-10-09T11:30:00Z"> + updated="2011-01-21T11:33:21Z"> <atom:link href="http://localhost/v1.0/" rel="self"/> </version> - </versions>""".replace(" ", "").replace("\n", "") + </versions>""".replace(" ", "").replace("\n", "") % (wsgi.XMLNS_V11, + wsgi.XMLNS_ATOM) + + actual = res.body.replace(" ", "").replace("\n", "") + + self.assertEqual(expected, actual) + + def test_get_version_1_0_detail_atom(self): + req = webob.Request.blank('/v1.0/') + req.accept = "application/atom+xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual("application/atom+xml", res.content_type) + expected = """ + <feed xmlns="http://www.w3.org/2005/Atom"> + <title type="text">About This Version</title> + <updated>2011-01-21T11:33:21Z</updated> + <id>http://localhost/v1.0/</id> + <author> + <name>Rackspace</name> + <uri>http://www.rackspace.com/</uri> + </author> + <link href="http://localhost/v1.0/" rel="self"/> + <entry> + <id>http://localhost/v1.0/</id> + <title type="text">Version v1.0</title> + <updated>2011-01-21T11:33:21Z</updated> + <link href="http://localhost/v1.0/" + rel="self"/> + <link href="http://docs.rackspacecloud.com/servers/ + api/v1.0/cs-devguide-20110125.pdf" + rel="describedby" type="application/pdf"/> + <link href="http://docs.rackspacecloud.com/servers/ + api/v1.0/application.wadl" + rel="describedby" type="application/vnd.sun.wadl+xml"/> + <content type="text"> + Version v1.0 DEPRECATED (2011-01-21T11:33:21Z) + </content> + </entry> + </feed>""".replace(" ", "").replace("\n", "") actual = res.body.replace(" ", "").replace("\n", "") + self.assertEqual(expected, actual) + + def test_get_version_1_1_detail_atom(self): + req = webob.Request.blank('/v1.1/') + req.accept = "application/atom+xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + self.assertEqual("application/atom+xml", res.content_type) + expected = """ + <feed xmlns="http://www.w3.org/2005/Atom"> + <title type="text">About This Version</title> + <updated>2011-01-21T11:33:21Z</updated> + <id>http://localhost/v1.1/</id> + <author> + <name>Rackspace</name> + <uri>http://www.rackspace.com/</uri> + </author> + <link href="http://localhost/v1.1/" rel="self"/> + <entry> + <id>http://localhost/v1.1/</id> + <title type="text">Version v1.1</title> + <updated>2011-01-21T11:33:21Z</updated> + <link href="http://localhost/v1.1/" + rel="self"/> + <link href="http://docs.rackspacecloud.com/servers/ + api/v1.1/cs-devguide-20110125.pdf" + rel="describedby" type="application/pdf"/> + <link href="http://docs.rackspacecloud.com/servers/ + api/v1.1/application.wadl" + rel="describedby" type="application/vnd.sun.wadl+xml"/> + <content type="text"> + Version v1.1 CURRENT (2011-01-21T11:33:21Z) + </content> + </entry> + </feed>""".replace(" ", "").replace("\n", "") + actual = res.body.replace(" ", "").replace("\n", "") self.assertEqual(expected, actual) def test_get_version_list_atom(self): @@ -95,7 +419,7 @@ class VersionsTest(test.TestCase): expected = """ <feed xmlns="http://www.w3.org/2005/Atom"> <title type="text">Available API Versions</title> - <updated>2011-07-18T11:30:00Z</updated> + <updated>2011-01-21T11:33:21Z</updated> <id>http://localhost/</id> <author> <name>Rackspace</name> @@ -105,19 +429,19 @@ class VersionsTest(test.TestCase): <entry> <id>http://localhost/v1.1/</id> <title type="text">Version v1.1</title> - <updated>2011-07-18T11:30:00Z</updated> + <updated>2011-01-21T11:33:21Z</updated> <link href="http://localhost/v1.1/" rel="self"/> <content type="text"> - Version v1.1 CURRENT (2011-07-18T11:30:00Z) + Version v1.1 CURRENT (2011-01-21T11:33:21Z) </content> </entry> <entry> <id>http://localhost/v1.0/</id> <title type="text">Version v1.0</title> - <updated>2010-10-09T11:30:00Z</updated> + <updated>2011-01-21T11:33:21Z</updated> <link href="http://localhost/v1.0/" rel="self"/> <content type="text"> - Version v1.0 DEPRECATED (2010-10-09T11:30:00Z) + Version v1.0 DEPRECATED (2011-01-21T11:33:21Z) </content> </entry> </feed> @@ -127,28 +451,184 @@ class VersionsTest(test.TestCase): self.assertEqual(expected, actual) + def test_multi_choice_image(self): + req = webob.Request.blank('/images/1') + req.accept = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 300) + self.assertEqual(res.content_type, "application/json") + + expected = { + "choices": [ + { + "id": "v1.1", + "status": "CURRENT", + "links": [ + { + "href": "http://localhost/v1.1/images/1", + "rel": "self", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.1+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.1+json" + }, + ], + }, + { + "id": "v1.0", + "status": "DEPRECATED", + "links": [ + { + "href": "http://localhost/v1.0/images/1", + "rel": "self", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.0+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.0+json" + }, + ], + }, + ], } + + self.assertDictMatch(expected, json.loads(res.body)) + + def test_multi_choice_image_xml(self): + req = webob.Request.blank('/images/1') + req.accept = "application/xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 300) + self.assertEqual(res.content_type, "application/xml") + + expected = """ + <choices xmlns="%s" xmlns:atom="%s"> + <version id="v1.1" status="CURRENT"> + <media-types> + <media-type base="application/xml" + type="application/vnd.openstack.compute-v1.1+xml"/> + <media-type base="application/json" + type="application/vnd.openstack.compute-v1.1+json"/> + </media-types> + <atom:link href="http://localhost/v1.1/images/1" rel="self"/> + </version> + <version id="v1.0" status="DEPRECATED"> + <media-types> + <media-type base="application/xml" + type="application/vnd.openstack.compute-v1.0+xml"/> + <media-type base="application/json" + type="application/vnd.openstack.compute-v1.0+json"/> + </media-types> + <atom:link href="http://localhost/v1.0/images/1" rel="self"/> + </version> + </choices>""".replace(" ", "").replace("\n", "") % (wsgi.XMLNS_V11, + wsgi.XMLNS_ATOM) + + def test_multi_choice_server_atom(self): + """ + Make sure multi choice responses do not have content-type + application/atom+xml (should use default of json) + """ + req = webob.Request.blank('/servers/2') + req.accept = "application/atom+xml" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 300) + self.assertEqual(res.content_type, "application/json") + + def test_multi_choice_server(self): + req = webob.Request.blank('/servers/2') + req.accept = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 300) + self.assertEqual(res.content_type, "application/json") + + expected = { + "choices": [ + { + "id": "v1.1", + "status": "CURRENT", + "links": [ + { + "href": "http://localhost/v1.1/servers/2", + "rel": "self", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.1+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.1+json" + }, + ], + }, + { + "id": "v1.0", + "status": "DEPRECATED", + "links": [ + { + "href": "http://localhost/v1.0/servers/2", + "rel": "self", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.0+xml" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.0+json" + }, + ], + }, + ], } + + self.assertDictMatch(expected, json.loads(res.body)) + + +class VersionsViewBuilderTests(test.TestCase): def test_view_builder(self): base_url = "http://example.org/" version_data = { - "id": "3.2.1", - "status": "CURRENT", - "updated": "2011-07-18T11:30:00Z"} + "v3.2.1": { + "id": "3.2.1", + "status": "CURRENT", + "updated": "2011-07-18T11:30:00Z", + } + } expected = { - "id": "3.2.1", - "status": "CURRENT", - "updated": "2011-07-18T11:30:00Z", - "links": [ + "versions": [ { - "rel": "self", - "href": "http://example.org/3.2.1/", - }, - ], + "id": "3.2.1", + "status": "CURRENT", + "updated": "2011-07-18T11:30:00Z", + "links": [ + { + "rel": "self", + "href": "http://example.org/3.2.1/", + }, + ], + } + ] } builder = views.versions.ViewBuilder(base_url) - output = builder.build(version_data) + output = builder.build_versions(version_data) self.assertEqual(output, expected) @@ -163,7 +643,9 @@ class VersionsTest(test.TestCase): self.assertEqual(actual, expected) - def test_xml_serializer(self): + +class VersionsSerializerTests(test.TestCase): + def test_versions_list_xml_serializer(self): versions_data = { 'versions': [ { @@ -180,20 +662,137 @@ class VersionsTest(test.TestCase): ] } - expected = """ - <versions> - <version id="2.7.1" status="DEPRECATED" - updated="2011-07-18T11:30:00Z"> - <atom:link href="http://test/2.7.1" rel="self"/> - </version> - </versions>""".replace(" ", "").replace("\n", "") + serializer = versions.VersionsXMLSerializer() + response = serializer.index(versions_data) + + root = xml.etree.ElementTree.XML(response) + self.assertEqual(root.tag.split('}')[1], "versions") + self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) + version = list(root)[0] + self.assertEqual(version.tag.split('}')[1], "version") + self.assertEqual(version.get('id'), + versions_data['versions'][0]['id']) + self.assertEqual(version.get('status'), + versions_data['versions'][0]['status']) + + link = list(version)[0] + + self.assertEqual(link.tag.split('}')[1], "link") + self.assertEqual(link.tag.split('}')[0].strip('{'), wsgi.XMLNS_ATOM) + for key, val in versions_data['versions'][0]['links'][0].items(): + self.assertEqual(link.get(key), val) + + def test_versions_multi_xml_serializer(self): + versions_data = { + 'choices': [ + { + "id": "2.7.1", + "updated": "2011-07-18T11:30:00Z", + "status": "DEPRECATED", + "media-types": VERSIONS['v1.1']['media-types'], + "links": [ + { + "rel": "self", + "href": "http://test/2.7.1/images", + }, + ], + }, + ] + } + + serializer = versions.VersionsXMLSerializer() + response = serializer.multi(versions_data) + + root = xml.etree.ElementTree.XML(response) + self.assertEqual(root.tag.split('}')[1], "choices") + self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) + version = list(root)[0] + self.assertEqual(version.tag.split('}')[1], "version") + self.assertEqual(version.get('id'), versions_data['choices'][0]['id']) + self.assertEqual(version.get('status'), + versions_data['choices'][0]['status']) + + media_types = list(version)[0] + media_type_nodes = list(media_types) + self.assertEqual(media_types.tag.split('}')[1], "media-types") + + set_types = versions_data['choices'][0]['media-types'] + for i, type in enumerate(set_types): + node = media_type_nodes[i] + self.assertEqual(node.tag.split('}')[1], "media-type") + for key, val in set_types[i].items(): + self.assertEqual(node.get(key), val) + + link = list(version)[1] + + self.assertEqual(link.tag.split('}')[1], "link") + self.assertEqual(link.tag.split('}')[0].strip('{'), wsgi.XMLNS_ATOM) + for key, val in versions_data['choices'][0]['links'][0].items(): + self.assertEqual(link.get(key), val) + + def test_version_detail_xml_serializer(self): + version_data = { + "version": { + "id": "v1.0", + "status": "CURRENT", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.0/", + }, + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/cs-devguide-20110125.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.0/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.0+xml", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.0+json", + }, + ], + }, + } serializer = versions.VersionsXMLSerializer() - response = serializer.default(versions_data) - response = response.replace(" ", "").replace("\n", "") - self.assertEqual(expected, response) + response = serializer.show(version_data) + + root = xml.etree.ElementTree.XML(response) + self.assertEqual(root.tag.split('}')[1], "version") + self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11) - def test_atom_serializer(self): + children = list(root) + media_types = children[0] + media_type_nodes = list(media_types) + links = (children[1], children[2], children[3]) + + self.assertEqual(media_types.tag.split('}')[1], 'media-types') + for i, media_node in enumerate(media_type_nodes): + self.assertEqual(media_node.tag.split('}')[1], 'media-type') + for key, val in version_data['version']['media-types'][i].items(): + self.assertEqual(val, media_node.get(key)) + + for i, link in enumerate(links): + self.assertEqual(link.tag.split('}')[0].strip('{'), + 'http://www.w3.org/2005/Atom') + self.assertEqual(link.tag.split('}')[1], 'link') + for key, val in version_data['version']['links'][i].items(): + self.assertEqual(val, link.get(key)) + + def test_versions_list_atom_serializer(self): versions_data = { 'versions': [ { @@ -210,45 +809,158 @@ class VersionsTest(test.TestCase): ] } - expected = """ - <feed xmlns="http://www.w3.org/2005/Atom"> - <title type="text"> - Available API Versions - </title> - <updated> - 2011-07-20T11:40:00Z - </updated> - <id> - http://test/ - </id> - <author> - <name> - Rackspace - </name> - <uri> - http://www.rackspace.com/ - </uri> - </author> - <link href="http://test/" rel="self"/> - <entry> - <id> - http://test/2.9.8 - </id> - <title type="text"> - Version 2.9.8 - </title> - <updated> - 2011-07-20T11:40:00Z - </updated> - <link href="http://test/2.9.8" rel="self"/> - <content type="text"> - Version 2.9.8 CURRENT (2011-07-20T11:40:00Z) - </content> - </entry> - </feed>""".replace(" ", "").replace("\n", "") + serializer = versions.VersionsAtomSerializer() + response = serializer.index(versions_data) + + root = xml.etree.ElementTree.XML(response) + self.assertEqual(root.tag.split('}')[1], "feed") + self.assertEqual(root.tag.split('}')[0].strip('{'), + "http://www.w3.org/2005/Atom") + + children = list(root) + title = children[0] + updated = children[1] + id = children[2] + author = children[3] + link = children[4] + entry = children[5] + + self.assertEqual(title.tag.split('}')[1], 'title') + self.assertEqual(title.text, 'Available API Versions') + self.assertEqual(updated.tag.split('}')[1], 'updated') + self.assertEqual(updated.text, '2011-07-20T11:40:00Z') + self.assertEqual(id.tag.split('}')[1], 'id') + self.assertEqual(id.text, 'http://test/') + + self.assertEqual(author.tag.split('}')[1], 'author') + author_name = list(author)[0] + author_uri = list(author)[1] + self.assertEqual(author_name.tag.split('}')[1], 'name') + self.assertEqual(author_name.text, 'Rackspace') + self.assertEqual(author_uri.tag.split('}')[1], 'uri') + self.assertEqual(author_uri.text, 'http://www.rackspace.com/') + + self.assertEqual(link.get('href'), 'http://test/') + self.assertEqual(link.get('rel'), 'self') + + self.assertEqual(entry.tag.split('}')[1], 'entry') + entry_children = list(entry) + entry_id = entry_children[0] + entry_title = entry_children[1] + entry_updated = entry_children[2] + entry_link = entry_children[3] + entry_content = entry_children[4] + self.assertEqual(entry_id.tag.split('}')[1], "id") + self.assertEqual(entry_id.text, "http://test/2.9.8") + self.assertEqual(entry_title.tag.split('}')[1], "title") + self.assertEqual(entry_title.get('type'), "text") + self.assertEqual(entry_title.text, "Version 2.9.8") + self.assertEqual(entry_updated.tag.split('}')[1], "updated") + self.assertEqual(entry_updated.text, "2011-07-20T11:40:00Z") + self.assertEqual(entry_link.tag.split('}')[1], "link") + self.assertEqual(entry_link.get('href'), "http://test/2.9.8") + self.assertEqual(entry_link.get('rel'), "self") + self.assertEqual(entry_content.tag.split('}')[1], "content") + self.assertEqual(entry_content.get('type'), "text") + self.assertEqual(entry_content.text, + "Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)") + + def test_version_detail_atom_serializer(self): + versions_data = { + "version": { + "id": "v1.1", + "status": "CURRENT", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/", + }, + { + "rel": "describedby", + "type": "application/pdf", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/cs-devguide-20110125.pdf", + }, + { + "rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://docs.rackspacecloud.com/" + "servers/api/v1.1/application.wadl", + }, + ], + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.compute-v1.1+xml", + }, + { + "base": "application/json", + "type": "application/vnd.openstack.compute-v1.1+json", + } + ], + }, + } serializer = versions.VersionsAtomSerializer() - response = serializer.default(versions_data) - print response - response = response.replace(" ", "").replace("\n", "") - self.assertEqual(expected, response) + response = serializer.show(versions_data) + + root = xml.etree.ElementTree.XML(response) + self.assertEqual(root.tag.split('}')[1], "feed") + self.assertEqual(root.tag.split('}')[0].strip('{'), + "http://www.w3.org/2005/Atom") + + children = list(root) + title = children[0] + updated = children[1] + id = children[2] + author = children[3] + link = children[4] + entry = children[5] + + self.assertEqual(root.tag.split('}')[1], 'feed') + self.assertEqual(title.tag.split('}')[1], 'title') + self.assertEqual(title.text, 'About This Version') + self.assertEqual(updated.tag.split('}')[1], 'updated') + self.assertEqual(updated.text, '2011-01-21T11:33:21Z') + self.assertEqual(id.tag.split('}')[1], 'id') + self.assertEqual(id.text, 'http://localhost/v1.1/') + + self.assertEqual(author.tag.split('}')[1], 'author') + author_name = list(author)[0] + author_uri = list(author)[1] + self.assertEqual(author_name.tag.split('}')[1], 'name') + self.assertEqual(author_name.text, 'Rackspace') + self.assertEqual(author_uri.tag.split('}')[1], 'uri') + self.assertEqual(author_uri.text, 'http://www.rackspace.com/') + + self.assertEqual(link.get('href'), + 'http://localhost/v1.1/') + self.assertEqual(link.get('rel'), 'self') + + self.assertEqual(entry.tag.split('}')[1], 'entry') + entry_children = list(entry) + entry_id = entry_children[0] + entry_title = entry_children[1] + entry_updated = entry_children[2] + entry_links = (entry_children[3], entry_children[4], entry_children[5]) + entry_content = entry_children[6] + + self.assertEqual(entry_id.tag.split('}')[1], "id") + self.assertEqual(entry_id.text, + "http://localhost/v1.1/") + self.assertEqual(entry_title.tag.split('}')[1], "title") + self.assertEqual(entry_title.get('type'), "text") + self.assertEqual(entry_title.text, "Version v1.1") + self.assertEqual(entry_updated.tag.split('}')[1], "updated") + self.assertEqual(entry_updated.text, "2011-01-21T11:33:21Z") + + for i, link in enumerate(versions_data["version"]["links"]): + self.assertEqual(entry_links[i].tag.split('}')[1], "link") + for key, val in versions_data["version"]["links"][i].items(): + self.assertEqual(entry_links[i].get(key), val) + + self.assertEqual(entry_content.tag.split('}')[1], "content") + self.assertEqual(entry_content.get('type'), "text") + self.assertEqual(entry_content.text, + "Version v1.1 CURRENT (2011-01-21T11:33:21Z)") diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index 6a6e13d93..4a46a5764 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -29,7 +29,6 @@ from nova.scheduler import api FLAGS = flags.FLAGS -FLAGS.verbose = True def zone_get(context, zone_id): @@ -95,31 +94,15 @@ def zone_select(context, specs): class ZonesTest(test.TestCase): def setUp(self): super(ZonesTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} + self.flags(verbose=True, allow_admin_api=True) fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) - fakes.stub_out_auth(self.stubs) - - self.allow_admin = FLAGS.allow_admin_api - FLAGS.allow_admin_api = True self.stubs.Set(nova.db, 'zone_get', zone_get) self.stubs.Set(nova.db, 'zone_update', zone_update) self.stubs.Set(nova.db, 'zone_create', zone_create) self.stubs.Set(nova.db, 'zone_delete', zone_delete) - self.old_zone_name = FLAGS.zone_name - self.old_zone_capabilities = FLAGS.zone_capabilities - - def tearDown(self): - self.stubs.UnsetAll() - FLAGS.allow_admin_api = self.allow_admin - FLAGS.zone_name = self.old_zone_name - FLAGS.zone_capabilities = self.old_zone_capabilities - super(ZonesTest, self).tearDown() - def test_get_zone_list_scheduler(self): self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler) req = webob.Request.blank('/v1.0/zones') @@ -190,8 +173,8 @@ class ZonesTest(test.TestCase): self.assertFalse('username' in res_dict['zone']) def test_zone_info(self): - FLAGS.zone_name = 'darksecret' - FLAGS.zone_capabilities = ['cap1=a;b', 'cap2=c;d'] + caps = ['cap1=a;b', 'cap2=c;d'] + self.flags(zone_name='darksecret', zone_capabilities=caps) self.stubs.Set(api, '_call_scheduler', zone_capabilities) body = dict(zone=dict(username='zeb', password='sneaky')) @@ -205,7 +188,8 @@ class ZonesTest(test.TestCase): self.assertEqual(res_dict['zone']['cap2'], 'c;d') def test_zone_select(self): - FLAGS.build_plan_encryption_key = 'c286696d887c9aa0611bbb3e2025a45a' + key = 'c286696d887c9aa0611bbb3e2025a45a' + self.flags(build_plan_encryption_key=key) self.stubs.Set(api, 'select', zone_select) req = webob.Request.blank('/v1.0/zones/select') diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index aac3ff330..d51b19ccd 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -60,7 +60,10 @@ class FakeGlance(object): 'container_format': 'ovf'}, 'image_data': StringIO.StringIO('')}} - def __init__(self, host, port=None, use_ssl=False): + def __init__(self, host, port=None, use_ssl=False, auth_tok=None): + pass + + def set_auth_token(self, auth_tok): pass def get_image_meta(self, image_id): diff --git a/nova/tests/hyperv_unittest.py b/nova/tests/hyperv_unittest.py index 042819b9c..d346d0a70 100644 --- a/nova/tests/hyperv_unittest.py +++ b/nova/tests/hyperv_unittest.py @@ -21,24 +21,18 @@ import random from nova import context from nova import db -from nova import flags from nova import test -from nova.auth import manager from nova.virt import hyperv -FLAGS = flags.FLAGS -FLAGS.connection_type = 'hyperv' - class HyperVTestCase(test.TestCase): """Test cases for the Hyper-V driver""" def setUp(self): super(HyperVTestCase, self).setUp() - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.RequestContext(self.user, self.project) + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) + self.flags(connection_type='hyperv') def test_create_destroy(self): """Create a VM and destroy it""" diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 223e7ae57..0ff508ffa 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -31,6 +31,9 @@ class StubGlanceClient(object): self.add_response = add_response self.update_response = update_response + def set_auth_token(self, auth_tok): + pass + def get_image_meta(self, image_id): return self.images[image_id] @@ -232,3 +235,39 @@ class TestMutatorDateTimeTests(BaseGlanceTest): 'updated_at': None, 'deleted_at': None} return fixture + + +class TestGlanceSerializer(unittest.TestCase): + def test_serialize(self): + metadata = {'name': 'image1', + 'is_public': True, + 'foo': 'bar', + 'properties': { + 'prop1': 'propvalue1', + 'mappings': [ + {'virtual': 'aaa', + 'device': 'bbb'}, + {'virtual': 'xxx', + 'device': 'yyy'}], + 'block_device_mapping': [ + {'virtual_device': 'fake', + 'device_name': '/dev/fake'}, + {'virtual_device': 'ephemeral0', + 'device_name': '/dev/fake0'}]}} + + converted_expected = { + 'name': 'image1', + 'is_public': True, + 'foo': 'bar', + 'properties': { + 'prop1': 'propvalue1', + 'mappings': + '[{"device": "bbb", "virtual": "aaa"}, ' + '{"device": "yyy", "virtual": "xxx"}]', + 'block_device_mapping': + '[{"virtual_device": "fake", "device_name": "/dev/fake"}, ' + '{"virtual_device": "ephemeral0", ' + '"device_name": "/dev/fake0"}]'}} + converted = glance._convert_to_string(metadata) + self.assertEqual(converted, converted_expected) + self.assertEqual(glance._convert_from_string(converted), metadata) diff --git a/nova/tests/image/test_s3.py b/nova/tests/image/test_s3.py index 231e109f8..f1ceeb7fe 100644 --- a/nova/tests/image/test_s3.py +++ b/nova/tests/image/test_s3.py @@ -16,12 +16,9 @@ # under the License. from nova import context -from nova import flags from nova import test from nova.image import s3 -FLAGS = flags.FLAGS - ami_manifest_xml = """<?xml version="1.0" ?> <manifest> @@ -59,15 +56,10 @@ ami_manifest_xml = """<?xml version="1.0" ?> class TestS3ImageService(test.TestCase): def setUp(self): super(TestS3ImageService, self).setUp() - self.orig_image_service = FLAGS.image_service - FLAGS.image_service = 'nova.image.fake.FakeImageService' + self.flags(image_service='nova.image.fake.FakeImageService') self.image_service = s3.S3ImageService() self.context = context.RequestContext(None, None) - def tearDown(self): - super(TestS3ImageService, self).tearDown() - FLAGS.image_service = self.orig_image_service - def _assertEqualList(self, list0, list1, keys): self.assertEqual(len(list0), len(list1)) key = keys[0] diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 47bd8c1e4..fb2f88502 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -23,7 +23,6 @@ import random import string from nova import exception -from nova import flags from nova import service from nova import test # For the flags from nova.auth import manager @@ -32,8 +31,6 @@ from nova.log import logging from nova.tests.integrated.api import client -FLAGS = flags.FLAGS - LOG = logging.getLogger('nova.tests.integrated') @@ -151,6 +148,7 @@ class _IntegratedTestBase(test.TestCase): f = self._get_flags() self.flags(**f) + self.flags(verbose=True) def fake_get_image_service(image_href): image_id = int(str(image_href).split('/')[-1]) diff --git a/nova/tests/integrated/test_extensions.py b/nova/tests/integrated/test_extensions.py index 0d4ee8cab..c22cf0be0 100644 --- a/nova/tests/integrated/test_extensions.py +++ b/nova/tests/integrated/test_extensions.py @@ -17,7 +17,6 @@ import os -from nova import flags from nova.log import logging from nova.tests.integrated import integrated_helpers @@ -25,10 +24,6 @@ from nova.tests.integrated import integrated_helpers LOG = logging.getLogger('nova.tests.integrated') -FLAGS = flags.FLAGS -FLAGS.verbose = True - - class ExtensionsTest(integrated_helpers._IntegratedTestBase): def _get_flags(self): f = super(ExtensionsTest, self)._get_flags() diff --git a/nova/tests/integrated/test_login.py b/nova/tests/integrated/test_login.py index a5180b6bc..06359a52f 100644 --- a/nova/tests/integrated/test_login.py +++ b/nova/tests/integrated/test_login.py @@ -17,7 +17,6 @@ import unittest -from nova import flags from nova.log import logging from nova.tests.integrated import integrated_helpers from nova.tests.integrated.api import client @@ -25,9 +24,6 @@ from nova.tests.integrated.api import client LOG = logging.getLogger('nova.tests.integrated') -FLAGS = flags.FLAGS -FLAGS.verbose = True - class LoginTest(integrated_helpers._IntegratedTestBase): def test_login(self): diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 67b3c485a..150279a95 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -18,7 +18,6 @@ import time import unittest -from nova import flags from nova.log import logging from nova.tests.integrated import integrated_helpers from nova.tests.integrated.api import client @@ -27,10 +26,6 @@ from nova.tests.integrated.api import client LOG = logging.getLogger('nova.tests.integrated') -FLAGS = flags.FLAGS -FLAGS.verbose = True - - class ServersTest(integrated_helpers._IntegratedTestBase): def test_get_servers(self): """Simple check that listing servers works.""" diff --git a/nova/tests/integrated/test_volumes.py b/nova/tests/integrated/test_volumes.py index e9fb3c4d1..d3e936462 100644 --- a/nova/tests/integrated/test_volumes.py +++ b/nova/tests/integrated/test_volumes.py @@ -18,7 +18,6 @@ import unittest import time -from nova import flags from nova.log import logging from nova.tests.integrated import integrated_helpers from nova.tests.integrated.api import client @@ -28,10 +27,6 @@ from nova.volume import driver LOG = logging.getLogger('nova.tests.integrated') -FLAGS = flags.FLAGS -FLAGS.verbose = True - - class VolumesTest(integrated_helpers._IntegratedTestBase): def setUp(self): super(VolumesTest, self).setUp() diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py index fde32f797..74baaacc2 100644 --- a/nova/tests/integrated/test_xml.py +++ b/nova/tests/integrated/test_xml.py @@ -15,7 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -from nova import flags from nova.log import logging from nova.tests.integrated import integrated_helpers from nova.api.openstack import common @@ -24,10 +23,6 @@ from nova.api.openstack import common LOG = logging.getLogger('nova.tests.integrated') -FLAGS = flags.FLAGS -FLAGS.verbose = True - - class XmlTests(integrated_helpers._IntegratedTestBase): """"Some basic XML sanity checks.""" diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index b1892dab4..7e664d3f9 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -19,12 +19,9 @@ Tests For Scheduler Host Filters. import json from nova import exception -from nova import flags from nova import test from nova.scheduler import host_filter -FLAGS = flags.FLAGS - class FakeZoneManager: pass @@ -57,9 +54,9 @@ class HostFilterTestCase(test.TestCase): 'host_name-label': 'xs-%s' % multiplier} def setUp(self): - self.old_flag = FLAGS.default_host_filter - FLAGS.default_host_filter = \ - 'nova.scheduler.host_filter.AllHostsFilter' + super(HostFilterTestCase, self).setUp() + default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter' + self.flags(default_host_filter=default_host_filter) self.instance_type = dict(name='tiny', memory_mb=50, vcpus=10, @@ -98,9 +95,6 @@ class HostFilterTestCase(test.TestCase): host09['xpu_arch'] = 'fermi' host09['xpu_info'] = 'Tesla 2150' - def tearDown(self): - FLAGS.default_host_filter = self.old_flag - def test_choose_filter(self): # Test default filter ... hf = host_filter.choose_host_filter() diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index 49791053e..fbe6b2f77 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -16,13 +16,11 @@ Tests For Least Cost Scheduler """ -from nova import flags from nova import test from nova.scheduler import least_cost from nova.tests.scheduler import test_zone_aware_scheduler MB = 1024 * 1024 -FLAGS = flags.FLAGS class FakeHost(object): @@ -95,10 +93,9 @@ class LeastCostSchedulerTestCase(test.TestCase): self.assertWeights(expected, num, request_spec, hosts) def test_noop_cost_fn(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn', - ] - FLAGS.noop_cost_fn_weight = 1 + self.flags(least_cost_scheduler_cost_functions=[ + 'nova.scheduler.least_cost.noop_cost_fn'], + noop_cost_fn_weight=1) num = 1 request_spec = {} @@ -109,10 +106,9 @@ class LeastCostSchedulerTestCase(test.TestCase): self.assertWeights(expected, num, request_spec, hosts) def test_cost_fn_weights(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn', - ] - FLAGS.noop_cost_fn_weight = 2 + self.flags(least_cost_scheduler_cost_functions=[ + 'nova.scheduler.least_cost.noop_cost_fn'], + noop_cost_fn_weight=2) num = 1 request_spec = {} @@ -123,10 +119,9 @@ class LeastCostSchedulerTestCase(test.TestCase): self.assertWeights(expected, num, request_spec, hosts) def test_compute_fill_first_cost_fn(self): - FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.compute_fill_first_cost_fn', - ] - FLAGS.compute_fill_first_cost_fn_weight = 1 + self.flags(least_cost_scheduler_cost_functions=[ + 'nova.scheduler.least_cost.compute_fill_first_cost_fn'], + compute_fill_first_cost_fn_weight=1) num = 1 instance_type = {'memory_mb': 1024} diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index daea826fd..330dab5e5 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -21,9 +21,10 @@ Tests For Scheduler import datetime import mox -import novaclient.exceptions import stubout -import webob + +from novaclient import v1_1 as novaclient +from novaclient import exceptions as novaclient_exceptions from mox import IgnoreArg from nova import context @@ -34,12 +35,10 @@ from nova import service from nova import test from nova import rpc from nova import utils -from nova.auth import manager as auth_manager from nova.scheduler import api from nova.scheduler import manager from nova.scheduler import driver from nova.compute import power_state -from nova.db.sqlalchemy import models FLAGS = flags.FLAGS @@ -250,23 +249,17 @@ class SimpleDriverTestCase(test.TestCase): volume_driver='nova.volume.driver.FakeISCSIDriver', scheduler_driver='nova.scheduler.simple.SimpleScheduler') self.scheduler = manager.SchedulerManager() - self.manager = auth_manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake') - self.project = self.manager.create_project('fake', 'fake', 'fake') self.context = context.get_admin_context() - - def tearDown(self): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - super(SimpleDriverTestCase, self).tearDown() + self.user_id = 'fake' + self.project_id = 'fake' def _create_instance(self, **kwargs): """Create a test instance""" inst = {} inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id + inst['user_id'] = self.user_id + inst['project_id'] = self.project_id inst['instance_type_id'] = '1' inst['vcpus'] = kwargs.get('vcpus', 1) inst['ami_launch_index'] = 0 @@ -485,11 +478,6 @@ class SimpleDriverTestCase(test.TestCase): self.assertEqual(host, 'host2') volume1.delete_volume(self.context, volume_id1) db.volume_destroy(self.context, volume_id2) - dic = {'service_id': s_ref['id'], - 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100, - 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10, - 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, - 'cpu_info': ''} def test_doesnt_report_disabled_hosts_as_up(self): """Ensures driver doesn't find hosts before they are enabled""" @@ -976,13 +964,10 @@ class ZoneRedirectTest(test.TestCase): self.stubs.Set(db, 'zone_get_all', zone_get_all) self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid) - - self.enable_zone_routing = FLAGS.enable_zone_routing - FLAGS.enable_zone_routing = True + self.flags(enable_zone_routing=True) def tearDown(self): self.stubs.UnsetAll() - FLAGS.enable_zone_routing = self.enable_zone_routing super(ZoneRedirectTest, self).tearDown() def test_trap_found_locally(self): @@ -1012,7 +997,7 @@ class ZoneRedirectTest(test.TestCase): self.assertEquals(e.results['magic'], 'found me') def test_routing_flags(self): - FLAGS.enable_zone_routing = False + self.flags(enable_zone_routing=False) decorator = FakeRerouteCompute("foo") self.assertRaises(exception.InstanceNotFound, decorator(go_boom), None, None, 1) @@ -1053,10 +1038,10 @@ class FakeServerCollection(object): class FakeEmptyServerCollection(object): def get(self, f): - raise novaclient.NotFound(1) + raise novaclient_exceptions.NotFound(1) def find(self, name): - raise novaclient.NotFound(2) + raise novaclient_exceptions.NotFound(2) class FakeNovaClient(object): @@ -1102,7 +1087,7 @@ class FakeZonesProxy(object): raise Exception('testing') -class FakeNovaClientOpenStack(object): +class FakeNovaClientZones(object): def __init__(self, *args, **kwargs): self.zones = FakeZonesProxy() @@ -1115,7 +1100,7 @@ class CallZoneMethodTest(test.TestCase): super(CallZoneMethodTest, self).setUp() self.stubs = stubout.StubOutForTesting() self.stubs.Set(db, 'zone_get_all', zone_get_all) - self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) + self.stubs.Set(novaclient, 'Client', FakeNovaClientZones) def tearDown(self): self.stubs.UnsetAll() diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py index d74b71fb6..788efca52 100644 --- a/nova/tests/scheduler/test_zone_aware_scheduler.py +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -16,10 +16,14 @@ Tests For Zone Aware Scheduler. """ +import json + import nova.db from nova import exception +from nova import rpc from nova import test +from nova.compute import api as compute_api from nova.scheduler import driver from nova.scheduler import zone_aware_scheduler from nova.scheduler import zone_manager @@ -112,7 +116,7 @@ def fake_provision_resource_from_blob(context, item, instance_id, def fake_decrypt_blob_returns_local_info(blob): - return {'foo': True} # values aren't important. + return {'hostname': 'foooooo'} # values aren't important. def fake_decrypt_blob_returns_child_info(blob): @@ -281,14 +285,29 @@ class ZoneAwareSchedulerTestCase(test.TestCase): global was_called sched = FakeZoneAwareScheduler() was_called = False + + def fake_create_db_entry_for_new_instance(self, context, + image, base_options, security_group, + block_device_mapping, num=1): + global was_called + was_called = True + # return fake instances + return {'id': 1, 'uuid': 'f874093c-7b17-49c0-89c3-22a5348497f9'} + + def fake_rpc_cast(*args, **kwargs): + pass + self.stubs.Set(sched, '_decrypt_blob', fake_decrypt_blob_returns_local_info) - self.stubs.Set(sched, '_provision_resource_locally', - fake_provision_resource_locally) + self.stubs.Set(compute_api.API, + 'create_db_entry_for_new_instance', + fake_create_db_entry_for_new_instance) + self.stubs.Set(rpc, 'cast', fake_rpc_cast) - request_spec = {'blob': "Non-None blob data"} + build_plan_item = {'blob': "Non-None blob data"} + request_spec = {'image': {}, 'instance_properties': {}} - sched._provision_resource_from_blob(None, request_spec, 1, + sched._provision_resource_from_blob(None, build_plan_item, 1, request_spec, {}) self.assertTrue(was_called) @@ -327,3 +346,19 @@ class ZoneAwareSchedulerTestCase(test.TestCase): sched._provision_resource_from_blob(None, request_spec, 1, request_spec, {}) self.assertTrue(was_called) + + def test_decrypt_blob(self): + """Test that the decrypt method works.""" + + fixture = FakeZoneAwareScheduler() + test_data = {"foo": "bar"} + + class StubDecryptor(object): + def decryptor(self, key): + return lambda blob: blob + + self.stubs.Set(zone_aware_scheduler, 'crypto', + StubDecryptor()) + + self.assertEqual(fixture._decrypt_blob(test_data), + json.dumps(test_data)) diff --git a/nova/tests/test_access.py b/nova/tests/test_access.py index e170ccee6..3b54fc249 100644 --- a/nova/tests/test_access.py +++ b/nova/tests/test_access.py @@ -16,7 +16,6 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest import webob from nova import context @@ -41,7 +40,7 @@ class FakeApiRequest(object): class AccessTestCase(test.TestCase): def _env_for(self, ctxt, action): env = {} - env['ec2.context'] = ctxt + env['nova.context'] = ctxt env['ec2.request'] = FakeApiRequest(action) return env @@ -93,7 +92,11 @@ class AccessTestCase(test.TestCase): super(AccessTestCase, self).tearDown() def response_status(self, user, methodName): - ctxt = context.RequestContext(user, self.project) + roles = manager.AuthManager().get_active_roles(user, self.project) + ctxt = context.RequestContext(user.id, + self.project.id, + is_admin=user.is_admin(), + roles=roles) environ = self._env_for(ctxt, methodName) req = webob.Request.blank('/', environ) resp = req.get_response(self.mw) @@ -105,30 +108,26 @@ class AccessTestCase(test.TestCase): def shouldDeny(self, user, methodName): self.assertEqual(401, self.response_status(user, methodName)) - def test_001_allow_all(self): + def test_allow_all(self): users = [self.testadmin, self.testpmsys, self.testnet, self.testsys] for user in users: self.shouldAllow(user, '_allow_all') - def test_002_allow_none(self): + def test_allow_none(self): self.shouldAllow(self.testadmin, '_allow_none') users = [self.testpmsys, self.testnet, self.testsys] for user in users: self.shouldDeny(user, '_allow_none') - def test_003_allow_project_manager(self): + def test_allow_project_manager(self): for user in [self.testadmin, self.testpmsys]: self.shouldAllow(user, '_allow_project_manager') for user in [self.testnet, self.testsys]: self.shouldDeny(user, '_allow_project_manager') - def test_004_allow_sys_and_net(self): + def test_allow_sys_and_net(self): for user in [self.testadmin, self.testnet, self.testsys]: self.shouldAllow(user, '_allow_sys_and_net') # denied because it doesn't have the per project sysadmin for user in [self.testpmsys]: self.shouldDeny(user, '_allow_sys_and_net') - -if __name__ == "__main__": - # TODO: Implement use_fake as an option - unittest.main() diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py index 877cf4ea1..06cc498ac 100644 --- a/nova/tests/test_adminapi.py +++ b/nova/tests/test_adminapi.py @@ -25,7 +25,6 @@ from nova import log as logging from nova import rpc from nova import test from nova import utils -from nova.auth import manager from nova.api.ec2 import admin from nova.image import fake @@ -39,7 +38,7 @@ class AdminApiTestCase(test.TestCase): super(AdminApiTestCase, self).setUp() self.flags(connection_type='fake') - self.conn = rpc.Connection.instance() + self.conn = rpc.create_connection() # set up our cloud self.api = admin.AdminController() @@ -51,11 +50,11 @@ class AdminApiTestCase(test.TestCase): self.volume = self.start_service('volume') self.image_service = utils.import_object(FLAGS.image_service) - self.manager = manager.AuthManager() - self.user = self.manager.create_user('admin', 'admin', 'admin', True) - self.project = self.manager.create_project('proj', 'admin', 'proj') - self.context = context.RequestContext(user=self.user, - project=self.project) + self.user_id = 'admin' + self.project_id = 'admin' + self.context = context.RequestContext(self.user_id, + self.project_id, + True) def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, @@ -73,11 +72,6 @@ class AdminApiTestCase(test.TestCase): self.stubs.Set(rpc, 'cast', finish_cast) - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - super(AdminApiTestCase, self).tearDown() - def test_block_external_ips(self): """Make sure provider firewall rules are created.""" result = self.api.block_external_addresses(self.context, '1.1.1.1/32') diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index fe7fd8402..2011ae756 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -27,14 +27,15 @@ import random import StringIO import webob +from nova import block_device from nova import context from nova import exception from nova import test +from nova import wsgi from nova.api import ec2 from nova.api.ec2 import apirequest from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils -from nova.auth import manager class FakeHttplibSocket(object): @@ -147,10 +148,12 @@ class Ec2utilsTestCase(test.TestCase): properties0 = {'mappings': mappings} properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings} - root_device_name = ec2utils.properties_root_device_name(properties0) + root_device_name = block_device.properties_root_device_name( + properties0) self.assertEqual(root_device_name, '/dev/sda1') - root_device_name = ec2utils.properties_root_device_name(properties1) + root_device_name = block_device.properties_root_device_name( + properties1) self.assertEqual(root_device_name, '/dev/sdb') def test_mapping_prepend_dev(self): @@ -184,7 +187,7 @@ class Ec2utilsTestCase(test.TestCase): 'device': '/dev/sdc1'}, {'virtual': 'ephemeral1', 'device': '/dev/sdc1'}] - self.assertDictListMatch(ec2utils.mappings_prepend_dev(mappings), + self.assertDictListMatch(block_device.mappings_prepend_dev(mappings), expected_result) @@ -192,10 +195,13 @@ class ApiEc2TestCase(test.TestCase): """Unit test for the cloud controller on an EC2 API""" def setUp(self): super(ApiEc2TestCase, self).setUp() - self.manager = manager.AuthManager() self.host = '127.0.0.1' - self.app = ec2.Authenticate(ec2.Requestify(ec2.Executor(), - 'nova.api.ec2.cloud.CloudController')) + # NOTE(vish): skipping the Authorizer + roles = ['sysadmin', 'netadmin'] + ctxt = context.RequestContext('fake', 'fake', roles=roles) + self.app = wsgi.InjectContext(ctxt, + ec2.Requestify(ec2.Authorizer(ec2.Executor()), + 'nova.api.ec2.cloud.CloudController')) def expect_http(self, host=None, is_secure=False, api_version=None): """Returns a new EC2 connection""" @@ -246,39 +252,25 @@ class ApiEc2TestCase(test.TestCase): self.expect_http(api_version='2010-10-30') self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') - # Any request should be fine self.ec2.get_all_instances() self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(), 'The version in the xmlns of the response does ' 'not match the API version given in the request.') - self.manager.delete_project(project) - self.manager.delete_user(user) - def test_describe_instances(self): """Test that, after creating a user and a project, the describe instances call to the API works properly""" self.expect_http() self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') self.assertEqual(self.ec2.get_all_instances(), []) - self.manager.delete_project(project) - self.manager.delete_user(user) def test_terminate_invalid_instance(self): """Attempt to terminate an invalid instance""" self.expect_http() self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') self.assertRaises(EC2ResponseError, self.ec2.terminate_instances, "i-00000005") - self.manager.delete_project(project) - self.manager.delete_user(user) def test_get_all_key_pairs(self): """Test that, after creating a user and project and generating @@ -287,16 +279,12 @@ class ApiEc2TestCase(test.TestCase): self.mox.ReplayAll() keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \ for x in range(random.randint(4, 8))) - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') # NOTE(vish): create depends on pool, so call helper directly - cloud._gen_key(context.get_admin_context(), user.id, keyname) + cloud._gen_key(context.get_admin_context(), 'fake', keyname) rv = self.ec2.get_all_key_pairs() results = [k for k in rv if k.name == keyname] self.assertEquals(len(results), 1) - self.manager.delete_project(project) - self.manager.delete_user(user) def test_create_duplicate_key_pair(self): """Test that, after successfully generating a keypair, @@ -305,8 +293,6 @@ class ApiEc2TestCase(test.TestCase): self.mox.ReplayAll() keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \ for x in range(random.randint(4, 8))) - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') # NOTE(vish): create depends on pool, so call helper directly self.ec2.create_key_pair('test') @@ -325,27 +311,16 @@ class ApiEc2TestCase(test.TestCase): """Test that we can retrieve security groups""" self.expect_http() self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake', admin=True) - project = self.manager.create_project('fake', 'fake', 'fake') rv = self.ec2.get_all_security_groups() self.assertEquals(len(rv), 1) self.assertEquals(rv[0].name, 'default') - self.manager.delete_project(project) - self.manager.delete_user(user) - def test_create_delete_security_group(self): """Test that we can create a security group""" self.expect_http() self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake', admin=True) - project = self.manager.create_project('fake', 'fake', 'fake') - - # At the moment, you need both of these to actually be netadmin - self.manager.add_role('fake', 'netadmin') - project.add_role('fake', 'netadmin') security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") for x in range(random.randint(4, 8))) @@ -364,8 +339,32 @@ class ApiEc2TestCase(test.TestCase): self.ec2.delete_security_group(security_group_name) - self.manager.delete_project(project) - self.manager.delete_user(user) + def test_group_name_valid_chars_security_group(self): + """ Test that we sanely handle invalid security group names. + API Spec states we should only accept alphanumeric characters, + spaces, dashes, and underscores. """ + self.expect_http() + self.mox.ReplayAll() + + # Test block group_name of non alphanumeric characters, spaces, + # dashes, and underscores. + security_group_name = "aa #^% -=99" + + self.assertRaises(EC2ResponseError, self.ec2.create_security_group, + security_group_name, 'test group') + + def test_group_name_valid_length_security_group(self): + """Test that we sanely handle invalid security group names. + API Spec states that the length should not exceed 255 chars """ + self.expect_http() + self.mox.ReplayAll() + + # Test block group_name > 255 chars + security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc") + for x in range(random.randint(256, 266))) + + self.assertRaises(EC2ResponseError, self.ec2.create_security_group, + security_group_name, 'test group') def test_authorize_revoke_security_group_cidr(self): """ @@ -374,12 +373,6 @@ class ApiEc2TestCase(test.TestCase): """ self.expect_http() self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') - - # At the moment, you need both of these to actually be netadmin - self.manager.add_role('fake', 'netadmin') - project.add_role('fake', 'netadmin') security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") for x in range(random.randint(4, 8))) @@ -426,9 +419,6 @@ class ApiEc2TestCase(test.TestCase): self.assertEqual(len(rv), 1) self.assertEqual(rv[0].name, 'default') - self.manager.delete_project(project) - self.manager.delete_user(user) - return def test_authorize_revoke_security_group_cidr_v6(self): @@ -438,12 +428,6 @@ class ApiEc2TestCase(test.TestCase): """ self.expect_http() self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake') - project = self.manager.create_project('fake', 'fake', 'fake') - - # At the moment, you need both of these to actually be netadmin - self.manager.add_role('fake', 'netadmin') - project.add_role('fake', 'netadmin') security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") for x in range(random.randint(4, 8))) @@ -489,9 +473,6 @@ class ApiEc2TestCase(test.TestCase): self.assertEqual(len(rv), 1) self.assertEqual(rv[0].name, 'default') - self.manager.delete_project(project) - self.manager.delete_user(user) - return def test_authorize_revoke_security_group_foreign_group(self): @@ -501,12 +482,6 @@ class ApiEc2TestCase(test.TestCase): """ self.expect_http() self.mox.ReplayAll() - user = self.manager.create_user('fake', 'fake', 'fake', admin=True) - project = self.manager.create_project('fake', 'fake', 'fake') - - # At the moment, you need both of these to actually be netadmin - self.manager.add_role('fake', 'netadmin') - project.add_role('fake', 'netadmin') rand_string = 'sdiuisudfsdcnpaqwertasd' security_group_name = "".join(random.choice(rand_string) @@ -560,8 +535,3 @@ class ApiEc2TestCase(test.TestCase): self.mox.ReplayAll() self.ec2.delete_security_group(security_group_name) - - self.manager.delete_project(project) - self.manager.delete_user(user) - - return diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index 71e0d17c9..2e24b7d6e 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -83,9 +83,9 @@ class user_and_project_generator(object): class _AuthManagerBaseTestCase(test.TestCase): def setUp(self): - FLAGS.auth_driver = self.auth_driver super(_AuthManagerBaseTestCase, self).setUp() - self.flags(connection_type='fake') + self.flags(auth_driver=self.auth_driver, + connection_type='fake') self.manager = manager.AuthManager(new=True) self.manager.mc.cache = {} @@ -102,7 +102,7 @@ class _AuthManagerBaseTestCase(test.TestCase): self.assertEqual('classified', u.secret) self.assertEqual('private-party', u.access) - def test_004_signature_is_valid(self): + def test_signature_is_valid(self): with user_generator(self.manager, name='admin', secret='admin', access='admin'): with project_generator(self.manager, name="admin", @@ -141,15 +141,14 @@ class _AuthManagerBaseTestCase(test.TestCase): '127.0.0.1', '/services/Cloud')) - def test_005_can_get_credentials(self): - return - credentials = self.manager.get_user('test1').get_credentials() - self.assertEqual(credentials, - 'export EC2_ACCESS_KEY="access"\n' + - 'export EC2_SECRET_KEY="secret"\n' + - 'export EC2_URL="http://127.0.0.1:8773/services/Cloud"\n' + - 'export S3_URL="http://127.0.0.1:3333/"\n' + - 'export EC2_USER_ID="test1"\n') + def test_can_get_credentials(self): + st = {'access': 'access', 'secret': 'secret'} + with user_and_project_generator(self.manager, user_state=st) as (u, p): + credentials = self.manager.get_environment_rc(u, p) + LOG.debug(credentials) + self.assertTrue('export EC2_ACCESS_KEY="access:testproj"\n' + in credentials) + self.assertTrue('export EC2_SECRET_KEY="secret"\n' in credentials) def test_can_list_users(self): with user_generator(self.manager): diff --git a/nova/tests/test_block_device.py b/nova/tests/test_block_device.py new file mode 100644 index 000000000..b8e9b35e2 --- /dev/null +++ b/nova/tests/test_block_device.py @@ -0,0 +1,87 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Isaku Yamahata +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests for Block Device utility functions. +""" + +from nova import block_device +from nova import test + + +class BlockDeviceTestCase(test.TestCase): + def test_properties(self): + root_device0 = '/dev/sda' + root_device1 = '/dev/sdb' + mappings = [{'virtual': 'root', + 'device': root_device0}] + + properties0 = {'mappings': mappings} + properties1 = {'mappings': mappings, + 'root_device_name': root_device1} + + self.assertEqual(block_device.properties_root_device_name({}), None) + self.assertEqual( + block_device.properties_root_device_name(properties0), + root_device0) + self.assertEqual( + block_device.properties_root_device_name(properties1), + root_device1) + + def test_ephemeral(self): + self.assertFalse(block_device.is_ephemeral('ephemeral')) + self.assertTrue(block_device.is_ephemeral('ephemeral0')) + self.assertTrue(block_device.is_ephemeral('ephemeral1')) + self.assertTrue(block_device.is_ephemeral('ephemeral11')) + self.assertFalse(block_device.is_ephemeral('root')) + self.assertFalse(block_device.is_ephemeral('swap')) + self.assertFalse(block_device.is_ephemeral('/dev/sda1')) + + self.assertEqual(block_device.ephemeral_num('ephemeral0'), 0) + self.assertEqual(block_device.ephemeral_num('ephemeral1'), 1) + self.assertEqual(block_device.ephemeral_num('ephemeral11'), 11) + + self.assertFalse(block_device.is_swap_or_ephemeral('ephemeral')) + self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral0')) + self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral1')) + self.assertTrue(block_device.is_swap_or_ephemeral('swap')) + self.assertFalse(block_device.is_swap_or_ephemeral('root')) + self.assertFalse(block_device.is_swap_or_ephemeral('/dev/sda1')) + + def test_mappings_prepend_dev(self): + mapping = [ + {'virtual': 'ami', 'device': '/dev/sda'}, + {'virtual': 'root', 'device': 'sda'}, + {'virtual': 'ephemeral0', 'device': 'sdb'}, + {'virtual': 'swap', 'device': 'sdc'}, + {'virtual': 'ephemeral1', 'device': 'sdd'}, + {'virtual': 'ephemeral2', 'device': 'sde'}] + + expected = [ + {'virtual': 'ami', 'device': '/dev/sda'}, + {'virtual': 'root', 'device': 'sda'}, + {'virtual': 'ephemeral0', 'device': '/dev/sdb'}, + {'virtual': 'swap', 'device': '/dev/sdc'}, + {'virtual': 'ephemeral1', 'device': '/dev/sdd'}, + {'virtual': 'ephemeral2', 'device': '/dev/sde'}] + + prepended = block_device.mappings_prepend_dev(mapping) + self.assertEqual(prepended.sort(), expected.sort()) + + def test_strip_dev(self): + self.assertEqual(block_device.strip_dev('/dev/sda'), 'sda') + self.assertEqual(block_device.strip_dev('sda'), 'sda') diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index e419e7a50..b2afc53c9 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -17,6 +17,8 @@ # under the License. import mox +import functools + from base64 import b64decode from M2Crypto import BIO from M2Crypto import RSA @@ -34,7 +36,6 @@ from nova import network from nova import rpc from nova import test from nova import utils -from nova.auth import manager from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils from nova.image import fake @@ -50,7 +51,7 @@ class CloudTestCase(test.TestCase): self.flags(connection_type='fake', stub_network=True) - self.conn = rpc.Connection.instance() + self.conn = rpc.create_connection() # set up our cloud self.cloud = cloud.CloudController() @@ -62,12 +63,11 @@ class CloudTestCase(test.TestCase): self.volume = self.start_service('volume') self.image_service = utils.import_object(FLAGS.image_service) - self.manager = manager.AuthManager() - self.user = self.manager.create_user('admin', 'admin', 'admin', True) - self.project = self.manager.create_project('proj', 'admin', 'proj') - self.context = context.RequestContext(user=self.user, - project=self.project) - host = self.network.host + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, + self.project_id, + True) def fake_show(meh, context, id): return {'id': 1, 'container_format': 'ami', @@ -87,27 +87,23 @@ class CloudTestCase(test.TestCase): self.stubs.Set(rpc, 'cast', finish_cast) def tearDown(self): - networks = db.project_get_networks(self.context, self.project.id, + networks = db.project_get_networks(self.context, self.project_id, associate=False) for network in networks: db.network_disassociate(self.context, network['id']) - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) super(CloudTestCase, self).tearDown() def _create_key(self, name): # NOTE(vish): create depends on pool, so just call helper directly - return cloud._gen_key(self.context, self.context.user.id, name) + return cloud._gen_key(self.context, self.context.user_id, name) def test_describe_regions(self): """Makes sure describe regions runs without raising an exception""" result = self.cloud.describe_regions(self.context) self.assertEqual(len(result['regionInfo']), 1) - regions = FLAGS.region_list - FLAGS.region_list = ["one=test_host1", "two=test_host2"] + self.flags(region_list=["one=test_host1", "two=test_host2"]) result = self.cloud.describe_regions(self.context) self.assertEqual(len(result['regionInfo']), 2) - FLAGS.region_list = regions def test_describe_addresses(self): """Makes sure describe addresses runs without raising an exception""" @@ -326,22 +322,15 @@ class CloudTestCase(test.TestCase): revoke = self.cloud.revoke_security_group_ingress self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs)) - def test_revoke_security_group_ingress_by_id(self): - kwargs = {'project_id': self.context.project_id, 'name': 'test'} - sec = db.security_group_create(self.context, kwargs) - authz = self.cloud.authorize_security_group_ingress - kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} - authz(self.context, group_id=sec['id'], **kwargs) - revoke = self.cloud.revoke_security_group_ingress - self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs)) - - def test_authorize_security_group_ingress_by_id(self): + def test_authorize_revoke_security_group_ingress_by_id(self): sec = db.security_group_create(self.context, {'project_id': self.context.project_id, 'name': 'test'}) authz = self.cloud.authorize_security_group_ingress kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} - self.assertTrue(authz(self.context, group_id=sec['id'], **kwargs)) + authz(self.context, group_id=sec['id'], **kwargs) + revoke = self.cloud.revoke_security_group_ingress + self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs)) def test_authorize_security_group_ingress_missing_protocol_params(self): sec = db.security_group_create(self.context, @@ -905,13 +894,16 @@ class CloudTestCase(test.TestCase): def test_modify_image_attribute(self): modify_image_attribute = self.cloud.modify_image_attribute + fake_metadata = {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine'}, 'is_public': False} + def fake_show(meh, context, id): - return {'id': 1, 'container_format': 'ami', - 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine'}, 'is_public': False} + return fake_metadata def fake_update(meh, context, image_id, metadata, data=None): - return metadata + fake_metadata.update(metadata) + return fake_metadata self.stubs.Set(fake._FakeImageService, 'show', fake_show) self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) @@ -961,21 +953,6 @@ class CloudTestCase(test.TestCase): self._wait_for_running(ec2_instance_id) return ec2_instance_id - def test_rescue_unrescue_instance(self): - instance_id = self._run_instance( - image_id='ami-1', - instance_type=FLAGS.default_instance_type, - max_count=1) - self.cloud.rescue_instance(context=self.context, - instance_id=instance_id) - # NOTE(vish): This currently does no validation, it simply makes sure - # that the code path doesn't throw an exception. - self.cloud.unrescue_instance(context=self.context, - instance_id=instance_id) - # TODO(soren): We need this until we can stop polling in the rpc code - # for unit tests. - self.cloud.terminate_instances(self.context, [instance_id]) - def test_console_output(self): instance_id = self._run_instance( image_id='ami-1', @@ -1004,7 +981,7 @@ class CloudTestCase(test.TestCase): key = RSA.load_key_string(private_key, callback=lambda: None) bio = BIO.MemoryBuffer() public_key = db.key_pair_get(self.context, - self.context.user.id, + self.context.user_id, 'test')['public_key'] key.save_pub_key_bio(bio) converted = crypto.ssl_pub_to_ssh_pub(bio.read()) @@ -1028,7 +1005,7 @@ class CloudTestCase(test.TestCase): 'mytestfprint') self.assertTrue(result1) keydata = db.key_pair_get(self.context, - self.context.user.id, + self.context.user_id, 'testimportkey1') self.assertEqual('mytestpubkey', keydata['public_key']) self.assertEqual('mytestfprint', keydata['fingerprint']) @@ -1045,7 +1022,7 @@ class CloudTestCase(test.TestCase): dummypub) self.assertTrue(result2) keydata = db.key_pair_get(self.context, - self.context.user.id, + self.context.user_id, 'testimportkey2') self.assertEqual(dummypub, keydata['public_key']) self.assertEqual(dummyfprint, keydata['fingerprint']) @@ -1492,3 +1469,147 @@ class CloudTestCase(test.TestCase): # TODO(yamahata): clean up snapshot created by CreateImage. self._restart_compute_service() + + @staticmethod + def _fake_bdm_get(ctxt, id): + return [{'volume_id': 87654321, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': None, + 'delete_on_termination': True, + 'device_name': '/dev/sdh'}, + {'volume_id': None, + 'snapshot_id': 98765432, + 'no_device': None, + 'virtual_name': None, + 'delete_on_termination': True, + 'device_name': '/dev/sdi'}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': True, + 'virtual_name': None, + 'delete_on_termination': None, + 'device_name': None}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': 'ephemeral0', + 'delete_on_termination': None, + 'device_name': '/dev/sdb'}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': 'swap', + 'delete_on_termination': None, + 'device_name': '/dev/sdc'}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': 'ephemeral1', + 'delete_on_termination': None, + 'device_name': '/dev/sdd'}, + {'volume_id': None, + 'snapshot_id': None, + 'no_device': None, + 'virtual_name': 'ephemeral2', + 'delete_on_termination': None, + 'device_name': '/dev/sd3'}, + ] + + def test_get_instance_mapping(self): + """Make sure that _get_instance_mapping works""" + ctxt = None + instance_ref0 = {'id': 0, + 'root_device_name': None} + instance_ref1 = {'id': 0, + 'root_device_name': '/dev/sda1'} + + self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', + self._fake_bdm_get) + + expected = {'ami': 'sda1', + 'root': '/dev/sda1', + 'ephemeral0': '/dev/sdb', + 'swap': '/dev/sdc', + 'ephemeral1': '/dev/sdd', + 'ephemeral2': '/dev/sd3'} + + self.assertEqual(self.cloud._format_instance_mapping(ctxt, + instance_ref0), + cloud._DEFAULT_MAPPINGS) + self.assertEqual(self.cloud._format_instance_mapping(ctxt, + instance_ref1), + expected) + + def test_describe_instance_attribute(self): + """Make sure that describe_instance_attribute works""" + self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', + self._fake_bdm_get) + + def fake_get(ctxt, instance_id): + return { + 'id': 0, + 'root_device_name': '/dev/sdh', + 'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}], + 'state_description': 'stopping', + 'instance_type': {'name': 'fake_type'}, + 'kernel_id': 1, + 'ramdisk_id': 2, + 'user_data': 'fake-user data', + } + self.stubs.Set(self.cloud.compute_api, 'get', fake_get) + + def fake_volume_get(ctxt, volume_id, session=None): + if volume_id == 87654321: + return {'id': volume_id, + 'attach_time': '13:56:24', + 'status': 'in-use'} + raise exception.VolumeNotFound(volume_id=volume_id) + self.stubs.Set(db.api, 'volume_get', fake_volume_get) + + get_attribute = functools.partial( + self.cloud.describe_instance_attribute, + self.context, 'i-12345678') + + bdm = get_attribute('blockDeviceMapping') + bdm['blockDeviceMapping'].sort() + + expected_bdm = {'instance_id': 'i-12345678', + 'rootDeviceType': 'ebs', + 'blockDeviceMapping': [ + {'deviceName': '/dev/sdh', + 'ebs': {'status': 'in-use', + 'deleteOnTermination': True, + 'volumeId': 87654321, + 'attachTime': '13:56:24'}}]} + expected_bdm['blockDeviceMapping'].sort() + self.assertEqual(bdm, expected_bdm) + # NOTE(yamahata): this isn't supported + # get_attribute('disableApiTermination') + groupSet = get_attribute('groupSet') + groupSet['groupSet'].sort() + expected_groupSet = {'instance_id': 'i-12345678', + 'groupSet': [{'groupId': 'fake0'}, + {'groupId': 'fake1'}]} + expected_groupSet['groupSet'].sort() + self.assertEqual(groupSet, expected_groupSet) + self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'), + {'instance_id': 'i-12345678', + 'instanceInitiatedShutdownBehavior': 'stop'}) + self.assertEqual(get_attribute('instanceType'), + {'instance_id': 'i-12345678', + 'instanceType': 'fake_type'}) + self.assertEqual(get_attribute('kernel'), + {'instance_id': 'i-12345678', + 'kernel': 'aki-00000001'}) + self.assertEqual(get_attribute('ramdisk'), + {'instance_id': 'i-12345678', + 'ramdisk': 'ari-00000002'}) + self.assertEqual(get_attribute('rootDeviceName'), + {'instance_id': 'i-12345678', + 'rootDeviceName': '/dev/sdh'}) + # NOTE(yamahata): this isn't supported + # get_attribute('sourceDestCheck') + self.assertEqual(get_attribute('userData'), + {'instance_id': 'i-12345678', + 'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'}) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 0ede4f469..d78b30a3d 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -19,10 +19,6 @@ Tests For Compute """ -import mox -import stubout - -from nova.auth import manager from nova import compute from nova.compute import instance_types from nova.compute import manager as compute_manager @@ -67,10 +63,9 @@ class ComputeTestCase(test.TestCase): network_manager='nova.network.manager.FlatManager') self.compute = utils.import_object(FLAGS.compute_manager) self.compute_api = compute.API() - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake') - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.RequestContext('fake', 'fake', False) + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) test_notifier.NOTIFICATIONS = [] def fake_show(meh, context, id): @@ -78,19 +73,14 @@ class ComputeTestCase(test.TestCase): self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show) - def tearDown(self): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - super(ComputeTestCase, self).tearDown() - def _create_instance(self, params={}): """Create a test instance""" inst = {} inst['image_ref'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id + inst['user_id'] = self.user_id + inst['project_id'] = self.project_id type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] inst['instance_type_id'] = type_id inst['ami_launch_index'] = 0 @@ -115,8 +105,8 @@ class ComputeTestCase(test.TestCase): def _create_group(self): values = {'name': 'testgroup', 'description': 'testgroup', - 'user_id': self.user.id, - 'project_id': self.project.id} + 'user_id': self.user_id, + 'project_id': self.project_id} return db.security_group_create(self.context, values) def _get_dummy_instance(self): @@ -350,8 +340,8 @@ class ComputeTestCase(test.TestCase): self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'compute.instance.create') payload = msg['payload'] - self.assertEquals(payload['tenant_id'], self.project.id) - self.assertEquals(payload['user_id'], self.user.id) + self.assertEquals(payload['tenant_id'], self.project_id) + self.assertEquals(payload['user_id'], self.user_id) self.assertEquals(payload['instance_id'], instance_id) self.assertEquals(payload['instance_type'], 'm1.tiny') type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] @@ -374,8 +364,8 @@ class ComputeTestCase(test.TestCase): self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'compute.instance.delete') payload = msg['payload'] - self.assertEquals(payload['tenant_id'], self.project.id) - self.assertEquals(payload['user_id'], self.user.id) + self.assertEquals(payload['tenant_id'], self.project_id) + self.assertEquals(payload['user_id'], self.user_id) self.assertEquals(payload['instance_id'], instance_id) self.assertEquals(payload['instance_type'], 'm1.tiny') type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] @@ -420,7 +410,7 @@ class ComputeTestCase(test.TestCase): def fake(*args, **kwargs): pass - self.stubs.Set(self.compute.driver, 'finish_resize', fake) + self.stubs.Set(self.compute.driver, 'finish_migration', fake) self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake) context = self.context.elevated() instance_id = self._create_instance() @@ -457,8 +447,8 @@ class ComputeTestCase(test.TestCase): self.assertEquals(msg['priority'], 'INFO') self.assertEquals(msg['event_type'], 'compute.instance.resize.prep') payload = msg['payload'] - self.assertEquals(payload['tenant_id'], self.project.id) - self.assertEquals(payload['user_id'], self.user.id) + self.assertEquals(payload['tenant_id'], self.project_id) + self.assertEquals(payload['user_id'], self.user_id) self.assertEquals(payload['instance_id'], instance_id) self.assertEquals(payload['instance_type'], 'm1.tiny') type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] @@ -506,8 +496,8 @@ class ComputeTestCase(test.TestCase): db.instance_update(self.context, instance_id, {'instance_type_id': inst_type['id']}) - self.assertRaises(exception.ApiError, self.compute_api.resize, - context, instance_id, 1) + self.assertRaises(exception.CannotResizeToSmallerSize, + self.compute_api.resize, context, instance_id, 1) self.compute.terminate_instance(context, instance_id) @@ -518,8 +508,8 @@ class ComputeTestCase(test.TestCase): self.compute.run_instance(self.context, instance_id) - self.assertRaises(exception.ApiError, self.compute_api.resize, - context, instance_id, 1) + self.assertRaises(exception.CannotResizeToSameSize, + self.compute_api.resize, context, instance_id, 1) self.compute.terminate_instance(context, instance_id) @@ -531,8 +521,8 @@ class ComputeTestCase(test.TestCase): def fake(*args, **kwargs): pass - self.stubs.Set(self.compute.driver, 'finish_resize', fake) - self.stubs.Set(self.compute.driver, 'revert_resize', fake) + self.stubs.Set(self.compute.driver, 'finish_migration', fake) + self.stubs.Set(self.compute.driver, 'revert_migration', fake) self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake) self.compute.run_instance(self.context, instance_id) @@ -545,7 +535,9 @@ class ComputeTestCase(test.TestCase): db.instance_update(self.context, instance_id, {'host': 'foo'}) - self.compute.prep_resize(context, inst_ref['uuid'], 3) + new_instance_type_ref = db.instance_type_get_by_flavor_id(context, 3) + self.compute.prep_resize(context, inst_ref['uuid'], + new_instance_type_ref['id']) migration_ref = db.migration_get_by_instance_and_status(context, inst_ref['uuid'], 'pre-migrating') @@ -849,7 +841,6 @@ class ComputeTestCase(test.TestCase): def test_run_kill_vm(self): """Detect when a vm is terminated behind the scenes""" - self.stubs = stubout.StubOutForTesting() self.stubs.Set(compute_manager.ComputeManager, '_report_driver_status', nop_report_driver_status) @@ -886,15 +877,17 @@ class ComputeTestCase(test.TestCase): return bdm def test_update_block_device_mapping(self): + swap_size = 1 + instance_type = {'swap': swap_size} instance_id = self._create_instance() mappings = [ {'virtual': 'ami', 'device': 'sda1'}, {'virtual': 'root', 'device': '/dev/sda1'}, - {'virtual': 'swap', 'device': 'sdb1'}, - {'virtual': 'swap', 'device': 'sdb2'}, - {'virtual': 'swap', 'device': 'sdb3'}, {'virtual': 'swap', 'device': 'sdb4'}, + {'virtual': 'swap', 'device': 'sdb3'}, + {'virtual': 'swap', 'device': 'sdb2'}, + {'virtual': 'swap', 'device': 'sdb1'}, {'virtual': 'ephemeral0', 'device': 'sdc1'}, {'virtual': 'ephemeral1', 'device': 'sdc2'}, @@ -936,32 +929,36 @@ class ComputeTestCase(test.TestCase): 'no_device': True}] self.compute_api._update_image_block_device_mapping( - self.context, instance_id, mappings) + self.context, instance_type, instance_id, mappings) bdms = [self._parse_db_block_device_mapping(bdm_ref) for bdm_ref in db.block_device_mapping_get_all_by_instance( self.context, instance_id)] expected_result = [ - {'virtual_name': 'swap', 'device_name': '/dev/sdb1'}, - {'virtual_name': 'swap', 'device_name': '/dev/sdb2'}, - {'virtual_name': 'swap', 'device_name': '/dev/sdb3'}, - {'virtual_name': 'swap', 'device_name': '/dev/sdb4'}, + {'virtual_name': 'swap', 'device_name': '/dev/sdb1', + 'volume_size': swap_size}, {'virtual_name': 'ephemeral0', 'device_name': '/dev/sdc1'}, - {'virtual_name': 'ephemeral1', 'device_name': '/dev/sdc2'}, - {'virtual_name': 'ephemeral2', 'device_name': '/dev/sdc3'}] + + # NOTE(yamahata): ATM only ephemeral0 is supported. + # they're ignored for now + #{'virtual_name': 'ephemeral1', 'device_name': '/dev/sdc2'}, + #{'virtual_name': 'ephemeral2', 'device_name': '/dev/sdc3'} + ] bdms.sort() expected_result.sort() self.assertDictListMatch(bdms, expected_result) self.compute_api._update_block_device_mapping( - self.context, instance_id, block_device_mapping) + self.context, instance_types.get_default_instance_type(), + instance_id, block_device_mapping) bdms = [self._parse_db_block_device_mapping(bdm_ref) for bdm_ref in db.block_device_mapping_get_all_by_instance( self.context, instance_id)] expected_result = [ {'snapshot_id': 0x12345678, 'device_name': '/dev/sda1'}, - {'virtual_name': 'swap', 'device_name': '/dev/sdb1'}, + {'virtual_name': 'swap', 'device_name': '/dev/sdb1', + 'volume_size': swap_size}, {'snapshot_id': 0x23456789, 'device_name': '/dev/sdb2'}, {'snapshot_id': 0x3456789A, 'device_name': '/dev/sdb3'}, {'no_device': True, 'device_name': '/dev/sdb4'}, @@ -983,3 +980,13 @@ class ComputeTestCase(test.TestCase): self.context, instance_id): db.block_device_mapping_destroy(self.context, bdm['id']) self.compute.terminate_instance(self.context, instance_id) + + def test_ephemeral_size(self): + local_size = 2 + inst_type = {'local_gb': local_size} + self.assertEqual(self.compute_api._ephemeral_size(inst_type, + 'ephemeral0'), + local_size) + self.assertEqual(self.compute_api._ephemeral_size(inst_type, + 'ephemeral1'), + 0) diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py index 1806cc1ea..cf7f592cf 100644 --- a/nova/tests/test_console.py +++ b/nova/tests/test_console.py @@ -26,10 +26,9 @@ from nova import exception from nova import flags from nova import test from nova import utils -from nova.auth import manager -from nova.console import manager as console_manager FLAGS = flags.FLAGS +flags.DECLARE('console_driver', 'nova.console.manager') class ConsoleTestCase(test.TestCase): @@ -39,17 +38,11 @@ class ConsoleTestCase(test.TestCase): self.flags(console_driver='nova.console.fake.FakeConsoleProxy', stub_compute=True) self.console = utils.import_object(FLAGS.console_manager) - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake') - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.get_admin_context() + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) self.host = 'test_compute_host' - def tearDown(self): - self.manager.delete_user(self.user) - self.manager.delete_project(self.project) - super(ConsoleTestCase, self).tearDown() - def _create_instance(self): """Create a test instance""" inst = {} @@ -58,8 +51,8 @@ class ConsoleTestCase(test.TestCase): inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id + inst['user_id'] = self.user_id + inst['project_id'] = self.project_id inst['instance_type_id'] = 1 inst['ami_launch_index'] = 0 return db.instance_create(self.context, inst)['id'] diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index 107fd03e3..0c07cbb7c 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -22,7 +22,6 @@ from nova import test from nova import context from nova import db from nova import flags -from nova.auth import manager FLAGS = flags.FLAGS @@ -45,42 +44,35 @@ def _setup_networking(instance_id, ip='1.2.3.4', flo_addr='1.2.1.2'): db.fixed_ip_create(ctxt, fixed_ip) fix_ref = db.fixed_ip_get_by_address(ctxt, ip) db.floating_ip_create(ctxt, {'address': flo_addr, - 'fixed_ip_id': fix_ref.id}) + 'fixed_ip_id': fix_ref['id']}) class DbApiTestCase(test.TestCase): def setUp(self): super(DbApiTestCase, self).setUp() - self.manager = manager.AuthManager() - self.user = self.manager.create_user('admin', 'admin', 'admin', True) - self.project = self.manager.create_project('proj', 'admin', 'proj') - self.context = context.RequestContext(user=self.user, - project=self.project) - - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - super(DbApiTestCase, self).tearDown() + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) def test_instance_get_project_vpn(self): - result = db.fixed_ip_get_all(self.context) values = {'instance_type_id': FLAGS.default_instance_type, 'image_ref': FLAGS.vpn_image_id, - 'project_id': self.project.id + 'project_id': self.project_id, } instance = db.instance_create(self.context, values) - result = db.instance_get_project_vpn(self.context, self.project.id) - self.assertEqual(instance.id, result.id) + result = db.instance_get_project_vpn(self.context.elevated(), + self.project_id) + self.assertEqual(instance['id'], result['id']) def test_instance_get_project_vpn_joins(self): - result = db.fixed_ip_get_all(self.context) values = {'instance_type_id': FLAGS.default_instance_type, 'image_ref': FLAGS.vpn_image_id, - 'project_id': self.project.id + 'project_id': self.project_id, } instance = db.instance_create(self.context, values) - _setup_networking(instance.id) - result = db.instance_get_project_vpn(self.context, self.project.id) - self.assertEqual(instance.id, result.id) + _setup_networking(instance['id']) + result = db.instance_get_project_vpn(self.context.elevated(), + self.project_id) + self.assertEqual(instance['id'], result['id']) self.assertEqual(result['fixed_ips'][0]['floating_ips'][0].address, '1.2.1.2') diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py index 438f3e522..3a1389a49 100644 --- a/nova/tests/test_host_filter.py +++ b/nova/tests/test_host_filter.py @@ -19,12 +19,9 @@ Tests For Scheduler Host Filters. import json from nova import exception -from nova import flags from nova import test from nova.scheduler import host_filter -FLAGS = flags.FLAGS - class FakeZoneManager: pass @@ -57,9 +54,9 @@ class HostFilterTestCase(test.TestCase): 'host_name-label': 'xs-%s' % multiplier} def setUp(self): - self.old_flag = FLAGS.default_host_filter - FLAGS.default_host_filter = \ - 'nova.scheduler.host_filter.AllHostsFilter' + super(HostFilterTestCase, self).setUp() + default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter' + self.flags(default_host_filter=default_host_filter) self.instance_type = dict(name='tiny', memory_mb=50, vcpus=10, @@ -76,9 +73,6 @@ class HostFilterTestCase(test.TestCase): states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} self.zone_manager.service_states = states - def tearDown(self): - FLAGS.default_host_filter = self.old_flag - def test_choose_filter(self): # Test default filter ... hf = host_filter.choose_host_filter() diff --git a/nova/tests/test_hosts.py b/nova/tests/test_hosts.py index 548f81f8b..a724db9da 100644 --- a/nova/tests/test_hosts.py +++ b/nova/tests/test_hosts.py @@ -48,6 +48,10 @@ def stub_set_host_enabled(context, host, enabled): return status +def stub_host_power_action(context, host, action): + return action + + class FakeRequest(object): environ = {"nova.context": context.get_admin_context()} @@ -62,6 +66,8 @@ class HostTestCase(test.TestCase): self.stubs.Set(scheduler_api, 'get_host_list', stub_get_host_list) self.stubs.Set(self.controller.compute_api, 'set_host_enabled', stub_set_host_enabled) + self.stubs.Set(self.controller.compute_api, 'host_power_action', + stub_host_power_action) def test_list_hosts(self): """Verify that the compute hosts are returned.""" @@ -87,6 +93,18 @@ class HostTestCase(test.TestCase): result_c2 = self.controller.update(self.req, "host_c2", body=en_body) self.assertEqual(result_c2["status"], "disabled") + def test_host_startup(self): + result = self.controller.startup(self.req, "host_c1") + self.assertEqual(result["power_action"], "startup") + + def test_host_shutdown(self): + result = self.controller.shutdown(self.req, "host_c1") + self.assertEqual(result["power_action"], "shutdown") + + def test_host_reboot(self): + result = self.controller.reboot(self.req, "host_c1") + self.assertEqual(result["power_action"], "reboot") + def test_bad_status_value(self): bad_body = {"status": "bad"} self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, diff --git a/nova/tests/test_image.py b/nova/tests/test_image.py new file mode 100644 index 000000000..9680d6f2b --- /dev/null +++ b/nova/tests/test_image.py @@ -0,0 +1,134 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# Author: Soren Hansen +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from nova import context +from nova import exception +from nova import test +import nova.image + + +class _ImageTestCase(test.TestCase): + def setUp(self): + super(_ImageTestCase, self).setUp() + self.context = context.get_admin_context() + + def test_index(self): + res = self.image_service.index(self.context) + for image in res: + self.assertEquals(set(image.keys()), set(['id', 'name'])) + + def test_detail(self): + res = self.image_service.detail(self.context) + for image in res: + keys = set(image.keys()) + self.assertEquals(keys, set(['id', 'name', 'created_at', + 'updated_at', 'deleted_at', 'deleted', + 'status', 'is_public', 'properties'])) + self.assertTrue(isinstance(image['created_at'], datetime.datetime)) + self.assertTrue(isinstance(image['updated_at'], datetime.datetime)) + + if not (isinstance(image['deleted_at'], datetime.datetime) or + image['deleted_at'] is None): + self.fail('image\'s "deleted_at" attribute was neither a ' + 'datetime object nor None') + + def check_is_bool(image, key): + val = image.get('deleted') + if not isinstance(val, bool): + self.fail('image\'s "%s" attribute wasn\'t ' + 'a bool: %r' % (key, val)) + + check_is_bool(image, 'deleted') + check_is_bool(image, 'is_public') + + def test_index_and_detail_have_same_results(self): + index = self.image_service.index(self.context) + detail = self.image_service.detail(self.context) + index_set = set([(i['id'], i['name']) for i in index]) + detail_set = set([(i['id'], i['name']) for i in detail]) + self.assertEqual(index_set, detail_set) + + def test_show_raises_imagenotfound_for_invalid_id(self): + self.assertRaises(exception.ImageNotFound, + self.image_service.show, + self.context, + 'this image does not exist') + + def test_show_by_name(self): + self.assertRaises(exception.ImageNotFound, + self.image_service.show_by_name, + self.context, + 'this image does not exist') + + def test_create_adds_id(self): + index = self.image_service.index(self.context) + image_count = len(index) + + self.image_service.create(self.context, {}) + + index = self.image_service.index(self.context) + self.assertEquals(len(index), image_count + 1) + + self.assertTrue(index[0]['id']) + + def test_create_keeps_id(self): + self.image_service.create(self.context, {'id': '34'}) + self.image_service.show(self.context, '34') + + def test_create_rejects_duplicate_ids(self): + self.image_service.create(self.context, {'id': '34'}) + self.assertRaises(exception.Duplicate, + self.image_service.create, + self.context, + {'id': '34'}) + + # Make sure there's still one left + self.image_service.show(self.context, '34') + + def test_update(self): + self.image_service.create(self.context, + {'id': '34', 'foo': 'bar'}) + + self.image_service.update(self.context, '34', + {'id': '34', 'foo': 'baz'}) + + img = self.image_service.show(self.context, '34') + self.assertEquals(img['foo'], 'baz') + + def test_delete(self): + self.image_service.create(self.context, {'id': '34', 'foo': 'bar'}) + self.image_service.delete(self.context, '34') + self.assertRaises(exception.NotFound, + self.image_service.show, + self.context, + '34') + + def test_delete_all(self): + self.image_service.create(self.context, {'id': '32', 'foo': 'bar'}) + self.image_service.create(self.context, {'id': '33', 'foo': 'bar'}) + self.image_service.create(self.context, {'id': '34', 'foo': 'bar'}) + self.image_service.delete_all() + index = self.image_service.index(self.context) + self.assertEquals(len(index), 0) + + +class FakeImageTestCase(_ImageTestCase): + def setUp(self): + super(FakeImageTestCase, self).setUp() + self.image_service = nova.image.fake.FakeImageService() diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index 11dc2ec98..d123df6f1 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -16,15 +16,12 @@ """Test suite for IPv6.""" -from nova import flags from nova import ipv6 from nova import log as logging from nova import test LOG = logging.getLogger('nova.tests.test_ipv6') -FLAGS = flags.FLAGS - import sys diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index de7f22925..5fc0d7605 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -32,14 +32,12 @@ from nova import flags from nova import test from nova import utils from nova.api.ec2 import cloud -from nova.auth import manager from nova.compute import power_state from nova.virt.libvirt import connection from nova.virt.libvirt import firewall libvirt = None FLAGS = flags.FLAGS -flags.DECLARE('instances_path', 'nova.compute.manager') def _concurrency(wait, done, target): @@ -94,6 +92,7 @@ def _setup_networking(instance_id, ip='1.2.3.4', mac='56:12:12:12:12:12'): class CacheConcurrencyTestCase(test.TestCase): def setUp(self): super(CacheConcurrencyTestCase, self).setUp() + self.flags(instances_path='nova.compute.manager') def fake_exists(fname): basedir = os.path.join(FLAGS.instances_path, '_base') @@ -154,36 +153,15 @@ class LibvirtConnTestCase(test.TestCase): super(LibvirtConnTestCase, self).setUp() connection._late_load_cheetah() self.flags(fake_call=True) - self.manager = manager.AuthManager() - - try: - pjs = self.manager.get_projects() - pjs = [p for p in pjs if p.name == 'fake'] - if 0 != len(pjs): - self.manager.delete_project(pjs[0]) - - users = self.manager.get_users() - users = [u for u in users if u.name == 'fake'] - if 0 != len(users): - self.manager.delete_user(users[0]) - except Exception, e: - pass - - users = self.manager.get_users() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) self.network = utils.import_object(FLAGS.network_manager) self.context = context.get_admin_context() - FLAGS.instances_path = '' + self.flags(instances_path='') self.call_libvirt_dependant_setup = False self.test_ip = '10.11.12.13' - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - super(LibvirtConnTestCase, self).tearDown() - test_instance = {'memory_kb': '1024000', 'basepath': '/some/path', 'bridge_name': 'br100', @@ -191,6 +169,7 @@ class LibvirtConnTestCase(test.TestCase): 'project_id': 'fake', 'bridge': 'br101', 'image_ref': '123456', + 'local_gb': 20, 'instance_type_id': '5'} # m1.small def lazy_load_library_exists(self): @@ -239,7 +218,7 @@ class LibvirtConnTestCase(test.TestCase): 'mac_address': 'fake', 'ip_address': 'fake', 'dhcp_server': 'fake', - 'extra_params': 'fake' + 'extra_params': 'fake', } # Creating mocks @@ -344,7 +323,7 @@ class LibvirtConnTestCase(test.TestCase): if not self.lazy_load_library_exists(): return - FLAGS.image_service = 'nova.image.fake.FakeImageService' + self.flags(image_service='nova.image.fake.FakeImageService') # Start test image_service = utils.import_object(FLAGS.image_service) @@ -368,7 +347,7 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = connection.LibvirtConnection(False) - conn.snapshot(instance_ref, recv_meta['id']) + conn.snapshot(self.context, instance_ref, recv_meta['id']) snapshot = image_service.show(context, recv_meta['id']) self.assertEquals(snapshot['properties']['image_state'], 'available') @@ -379,7 +358,7 @@ class LibvirtConnTestCase(test.TestCase): if not self.lazy_load_library_exists(): return - FLAGS.image_service = 'nova.image.fake.FakeImageService' + self.flags(image_service='nova.image.fake.FakeImageService') # Start test image_service = utils.import_object(FLAGS.image_service) @@ -408,7 +387,7 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = connection.LibvirtConnection(False) - conn.snapshot(instance_ref, recv_meta['id']) + conn.snapshot(self.context, instance_ref, recv_meta['id']) snapshot = image_service.show(context, recv_meta['id']) self.assertEquals(snapshot['properties']['image_state'], 'available') @@ -441,8 +420,8 @@ class LibvirtConnTestCase(test.TestCase): self.assertEquals(parameters[1].get('value'), 'fake') def _check_xml_and_container(self, instance): - user_context = context.RequestContext(project=self.project, - user=self.user) + user_context = context.RequestContext(self.user_id, + self.project_id) instance_ref = db.instance_create(user_context, instance) _setup_networking(instance_ref['id'], self.test_ip) @@ -470,11 +449,10 @@ class LibvirtConnTestCase(test.TestCase): def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel, rescue=False): - user_context = context.RequestContext(project=self.project, - user=self.user) + user_context = context.RequestContext(self.user_id, self.project_id) instance_ref = db.instance_create(user_context, instance) network_ref = db.project_get_networks(context.get_admin_context(), - self.project.id)[0] + self.project_id)[0] _setup_networking(instance_ref['id'], self.test_ip) @@ -544,7 +522,7 @@ class LibvirtConnTestCase(test.TestCase): 'disk.local')] for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): - FLAGS.libvirt_type = libvirt_type + self.flags(libvirt_type=libvirt_type) conn = connection.LibvirtConnection(True) uri = conn.get_uri() @@ -569,9 +547,9 @@ class LibvirtConnTestCase(test.TestCase): # checking against that later on. This way we make sure the # implementation doesn't fiddle around with the FLAGS. testuri = 'something completely different' - FLAGS.libvirt_uri = testuri + self.flags(libvirt_uri=testuri) for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): - FLAGS.libvirt_type = libvirt_type + self.flags(libvirt_type=libvirt_type) conn = connection.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, testuri) @@ -579,8 +557,7 @@ class LibvirtConnTestCase(test.TestCase): def test_update_available_resource_works_correctly(self): """Confirm compute_node table is updated successfully.""" - org_path = FLAGS.instances_path = '' - FLAGS.instances_path = '.' + self.flags(instances_path='.') # Prepare mocks def getVersion(): @@ -627,12 +604,10 @@ class LibvirtConnTestCase(test.TestCase): self.assertTrue(compute_node['hypervisor_version'] > 0) db.service_destroy(self.context, service_ref['id']) - FLAGS.instances_path = org_path def test_update_resource_info_no_compute_record_found(self): """Raise exception if no recorde found on services table.""" - org_path = FLAGS.instances_path = '' - FLAGS.instances_path = '.' + self.flags(instances_path='.') self.create_fake_libvirt_mock() self.mox.ReplayAll() @@ -641,8 +616,6 @@ class LibvirtConnTestCase(test.TestCase): conn.update_available_resource, self.context, 'dummy') - FLAGS.instances_path = org_path - def test_ensure_filtering_rules_for_instance_timeout(self): """ensure_filtering_fules_for_instance() finishes with timeout.""" # Skip if non-libvirt environment @@ -759,7 +732,7 @@ class LibvirtConnTestCase(test.TestCase): network_info = _create_network_info() try: - conn.spawn(instance, network_info) + conn.spawn(self.context, instance, network_info) except Exception, e: count = (0 <= str(e.message).find('Unexpected method call')) @@ -772,6 +745,42 @@ class LibvirtConnTestCase(test.TestCase): ip = conn.get_host_ip_addr() self.assertEquals(ip, FLAGS.my_ip) + def test_volume_in_mapping(self): + conn = connection.LibvirtConnection(False) + swap = {'device_name': '/dev/sdb', + 'swap_size': 1} + ephemerals = [{'num': 0, + 'virtual_name': 'ephemeral0', + 'device_name': '/dev/sdc1', + 'size': 1}, + {'num': 2, + 'virtual_name': 'ephemeral2', + 'device_name': '/dev/sdd', + 'size': 1}] + block_device_mapping = [{'mount_device': '/dev/sde', + 'device_path': 'fake_device'}, + {'mount_device': '/dev/sdf', + 'device_path': 'fake_device'}] + block_device_info = { + 'root_device_name': '/dev/sda', + 'swap': swap, + 'ephemerals': ephemerals, + 'block_device_mapping': block_device_mapping} + + def _assert_volume_in_mapping(device_name, true_or_false): + self.assertEquals(conn._volume_in_mapping(device_name, + block_device_info), + true_or_false) + + _assert_volume_in_mapping('sda', False) + _assert_volume_in_mapping('sdb', True) + _assert_volume_in_mapping('sdc1', True) + _assert_volume_in_mapping('sdd', True) + _assert_volume_in_mapping('sde', True) + _assert_volume_in_mapping('sdf', True) + _assert_volume_in_mapping('sdg', False) + _assert_volume_in_mapping('sdh1', False) + class NWFilterFakes: def __init__(self): @@ -802,11 +811,9 @@ class IptablesFirewallTestCase(test.TestCase): def setUp(self): super(IptablesFirewallTestCase, self).setUp() - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.RequestContext('fake', 'fake') + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) self.network = utils.import_object(FLAGS.network_manager) class FakeLibvirtConnection(object): @@ -832,11 +839,6 @@ class IptablesFirewallTestCase(test.TestCase): connection.libxml2 = __import__('libxml2') return True - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - super(IptablesFirewallTestCase, self).tearDown() - in_nat_rules = [ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011', '*nat', @@ -1144,11 +1146,9 @@ class NWFilterTestCase(test.TestCase): class Mock(object): pass - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.RequestContext(self.user, self.project) + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) self.fake_libvirt_connection = Mock() @@ -1156,11 +1156,6 @@ class NWFilterTestCase(test.TestCase): self.fw = firewall.NWFilterFirewall( lambda: self.fake_libvirt_connection) - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - super(NWFilterTestCase, self).tearDown() - def test_cidr_rule_nwfilter_xml(self): cloud_controller = cloud.CloudController() cloud_controller.create_security_group(self.context, diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index c862726ab..f81e7a00a 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -43,6 +43,7 @@ class MetadataTestCase(test.TestCase): 'reservation_id': 'r-xxxxxxxx', 'user_data': '', 'image_ref': 7, + 'root_device_name': '/dev/sda1', 'hostname': 'test'}) def instance_get(*args, **kwargs): diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 4119953f2..24292414f 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -17,7 +17,6 @@ from nova import db from nova import exception -from nova import flags from nova import log as logging from nova import test from nova.network import manager as network_manager @@ -26,7 +25,6 @@ from nova.network import manager as network_manager import mox -FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.network') diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py new file mode 100644 index 000000000..9c6563f14 --- /dev/null +++ b/nova/tests/test_nova_manage.py @@ -0,0 +1,82 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# Copyright 2011 Ilya Alekseyev +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys + +TOPDIR = os.path.normpath(os.path.join( + os.path.dirname(os.path.abspath(__file__)), + os.pardir, + os.pardir)) +NOVA_MANAGE_PATH = os.path.join(TOPDIR, 'bin', 'nova-manage') + +sys.dont_write_bytecode = True +import imp +nova_manage = imp.load_source('nova_manage.py', NOVA_MANAGE_PATH) +sys.dont_write_bytecode = False + +import netaddr +from nova import context +from nova import db +from nova import flags +from nova import test + +FLAGS = flags.FLAGS + + +class FixedIpCommandsTestCase(test.TestCase): + def setUp(self): + super(FixedIpCommandsTestCase, self).setUp() + cidr = '10.0.0.0/24' + net = netaddr.IPNetwork(cidr) + net_info = {'bridge': 'fakebr', + 'bridge_interface': 'fakeeth', + 'dns': FLAGS.flat_network_dns, + 'cidr': cidr, + 'netmask': str(net.netmask), + 'gateway': str(net[1]), + 'broadcast': str(net.broadcast), + 'dhcp_start': str(net[2])} + self.network = db.network_create_safe(context.get_admin_context(), + net_info) + num_ips = len(net) + for index in range(num_ips): + address = str(net[index]) + reserved = (index == 1 or index == 2) + db.fixed_ip_create(context.get_admin_context(), + {'network_id': self.network['id'], + 'address': address, + 'reserved': reserved}) + self.commands = nova_manage.FixedIpCommands() + + def tearDown(self): + db.network_delete_safe(context.get_admin_context(), self.network['id']) + super(FixedIpCommandsTestCase, self).tearDown() + + def test_reserve(self): + self.commands.reserve('10.0.0.100') + address = db.fixed_ip_get_by_address(context.get_admin_context(), + '10.0.0.100') + self.assertEqual(address['reserved'], True) + + def test_unreserve(self): + db.fixed_ip_update(context.get_admin_context(), '10.0.0.100', + {'reserved': True}) + self.commands.unreserve('10.0.0.100') + address = db.fixed_ip_get_by_address(context.get_admin_context(), + '10.0.0.100') + self.assertEqual(address['reserved'], False) diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py index 39b4e18d7..0b2dce20e 100644 --- a/nova/tests/test_objectstore.py +++ b/nova/tests/test_objectstore.py @@ -21,8 +21,6 @@ Unittets for S3 objectstore clone. """ import boto -import glob -import hashlib import os import shutil import tempfile @@ -30,12 +28,9 @@ import tempfile from boto import exception as boto_exception from boto.s3 import connection as s3 -from nova import context -from nova import exception from nova import flags from nova import wsgi from nova import test -from nova.auth import manager from nova.objectstore import s3server @@ -57,15 +52,9 @@ class S3APITestCase(test.TestCase): def setUp(self): """Setup users, projects, and start a test server.""" super(S3APITestCase, self).setUp() - self.flags(auth_driver='nova.auth.ldapdriver.FakeLdapDriver', - buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'), + self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'), s3_host='127.0.0.1') - self.auth_manager = manager.AuthManager() - self.admin_user = self.auth_manager.create_user('admin', admin=True) - self.admin_project = self.auth_manager.create_project('admin', - self.admin_user) - shutil.rmtree(FLAGS.buckets_path) os.mkdir(FLAGS.buckets_path) @@ -80,8 +69,8 @@ class S3APITestCase(test.TestCase): boto.config.add_section('Boto') boto.config.set('Boto', 'num_retries', '0') - conn = s3.S3Connection(aws_access_key_id=self.admin_user.access, - aws_secret_access_key=self.admin_user.secret, + conn = s3.S3Connection(aws_access_key_id='fake', + aws_secret_access_key='fake', host=FLAGS.s3_host, port=FLAGS.s3_port, is_secure=False, @@ -104,11 +93,11 @@ class S3APITestCase(test.TestCase): self.assertEquals(buckets[0].name, name, "Wrong name") return True - def test_000_list_buckets(self): + def test_list_buckets(self): """Make sure we are starting with no buckets.""" self._ensure_no_buckets(self.conn.get_all_buckets()) - def test_001_create_and_delete_bucket(self): + def test_create_and_delete_bucket(self): """Test bucket creation and deletion.""" bucket_name = 'testbucket' @@ -117,7 +106,7 @@ class S3APITestCase(test.TestCase): self.conn.delete_bucket(bucket_name) self._ensure_no_buckets(self.conn.get_all_buckets()) - def test_002_create_bucket_and_key_and_delete_key_again(self): + def test_create_bucket_and_key_and_delete_key_again(self): """Test key operations on buckets.""" bucket_name = 'testbucket' key_name = 'somekey' @@ -146,8 +135,6 @@ class S3APITestCase(test.TestCase): bucket_name) def tearDown(self): - """Tear down auth and test server.""" - self.auth_manager.delete_user('admin') - self.auth_manager.delete_project('admin') + """Tear down test server.""" self.server.stop() super(S3APITestCase, self).tearDown() diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index a35caadf8..f4b481ebe 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -20,12 +20,9 @@ from nova import compute from nova import context from nova import db from nova import flags -from nova import network from nova import quota from nova import test -from nova import utils from nova import volume -from nova.auth import manager from nova.compute import instance_types @@ -48,25 +45,20 @@ class QuotaTestCase(test.TestCase): quota_gigabytes=20, quota_floating_ips=1) - self.manager = manager.AuthManager() - self.user = self.manager.create_user('admin', 'admin', 'admin', True) - self.project = self.manager.create_project('admin', 'admin', 'admin') self.network = self.network = self.start_service('network') - self.context = context.RequestContext(project=self.project, - user=self.user) - - def tearDown(self): - manager.AuthManager().delete_project(self.project) - manager.AuthManager().delete_user(self.user) - super(QuotaTestCase, self).tearDown() + self.user_id = 'admin' + self.project_id = 'admin' + self.context = context.RequestContext(self.user_id, + self.project_id, + True) def _create_instance(self, cores=2): """Create a test instance""" inst = {} inst['image_id'] = 1 inst['reservation_id'] = 'r-fakeres' - inst['user_id'] = self.user.id - inst['project_id'] = self.project.id + inst['user_id'] = self.user_id + inst['project_id'] = self.project_id inst['instance_type_id'] = '3' # m1.large inst['vcpus'] = cores return db.instance_create(self.context, inst)['id'] @@ -74,8 +66,8 @@ class QuotaTestCase(test.TestCase): def _create_volume(self, size=10): """Create a test volume""" vol = {} - vol['user_id'] = self.user.id - vol['project_id'] = self.project.id + vol['user_id'] = self.user_id + vol['project_id'] = self.project_id vol['size'] = size return db.volume_create(self.context, vol)['id'] @@ -95,15 +87,15 @@ class QuotaTestCase(test.TestCase): num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 2) - db.quota_create(self.context, self.project.id, 'instances', 10) + db.quota_create(self.context, self.project_id, 'instances', 10) num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 4) - db.quota_create(self.context, self.project.id, 'cores', 100) + db.quota_create(self.context, self.project_id, 'cores', 100) num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 10) - db.quota_create(self.context, self.project.id, 'ram', 3 * 2048) + db.quota_create(self.context, self.project_id, 'ram', 3 * 2048) num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 3) @@ -113,23 +105,21 @@ class QuotaTestCase(test.TestCase): num_metadata_items = quota.allowed_metadata_items(self.context, too_many_items) self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items) - db.quota_create(self.context, self.project.id, 'metadata_items', 5) + db.quota_create(self.context, self.project_id, 'metadata_items', 5) num_metadata_items = quota.allowed_metadata_items(self.context, too_many_items) self.assertEqual(num_metadata_items, 5) # Cleanup - db.quota_destroy_all_by_project(self.context, self.project.id) + db.quota_destroy_all_by_project(self.context, self.project_id) def test_unlimited_instances(self): - FLAGS.quota_instances = 2 - FLAGS.quota_ram = -1 - FLAGS.quota_cores = -1 + self.flags(quota_instances=2, quota_ram=-1, quota_cores=-1) instance_type = self._get_instance_type('m1.small') num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 2) - db.quota_create(self.context, self.project.id, 'instances', None) + db.quota_create(self.context, self.project_id, 'instances', None) num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 100) @@ -138,14 +128,12 @@ class QuotaTestCase(test.TestCase): self.assertEqual(num_instances, 101) def test_unlimited_ram(self): - FLAGS.quota_instances = -1 - FLAGS.quota_ram = 2 * 2048 - FLAGS.quota_cores = -1 + self.flags(quota_instances=-1, quota_ram=2 * 2048, quota_cores=-1) instance_type = self._get_instance_type('m1.small') num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 2) - db.quota_create(self.context, self.project.id, 'ram', None) + db.quota_create(self.context, self.project_id, 'ram', None) num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 100) @@ -154,14 +142,12 @@ class QuotaTestCase(test.TestCase): self.assertEqual(num_instances, 101) def test_unlimited_cores(self): - FLAGS.quota_instances = -1 - FLAGS.quota_ram = -1 - FLAGS.quota_cores = 2 + self.flags(quota_instances=-1, quota_ram=-1, quota_cores=2) instance_type = self._get_instance_type('m1.small') num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 2) - db.quota_create(self.context, self.project.id, 'cores', None) + db.quota_create(self.context, self.project_id, 'cores', None) num_instances = quota.allowed_instances(self.context, 100, instance_type) self.assertEqual(num_instances, 100) @@ -170,42 +156,40 @@ class QuotaTestCase(test.TestCase): self.assertEqual(num_instances, 101) def test_unlimited_volumes(self): - FLAGS.quota_volumes = 10 - FLAGS.quota_gigabytes = -1 + self.flags(quota_volumes=10, quota_gigabytes=-1) volumes = quota.allowed_volumes(self.context, 100, 1) self.assertEqual(volumes, 10) - db.quota_create(self.context, self.project.id, 'volumes', None) + db.quota_create(self.context, self.project_id, 'volumes', None) volumes = quota.allowed_volumes(self.context, 100, 1) self.assertEqual(volumes, 100) volumes = quota.allowed_volumes(self.context, 101, 1) self.assertEqual(volumes, 101) def test_unlimited_gigabytes(self): - FLAGS.quota_volumes = -1 - FLAGS.quota_gigabytes = 10 + self.flags(quota_volumes=-1, quota_gigabytes=10) volumes = quota.allowed_volumes(self.context, 100, 1) self.assertEqual(volumes, 10) - db.quota_create(self.context, self.project.id, 'gigabytes', None) + db.quota_create(self.context, self.project_id, 'gigabytes', None) volumes = quota.allowed_volumes(self.context, 100, 1) self.assertEqual(volumes, 100) volumes = quota.allowed_volumes(self.context, 101, 1) self.assertEqual(volumes, 101) def test_unlimited_floating_ips(self): - FLAGS.quota_floating_ips = 10 + self.flags(quota_floating_ips=10) floating_ips = quota.allowed_floating_ips(self.context, 100) self.assertEqual(floating_ips, 10) - db.quota_create(self.context, self.project.id, 'floating_ips', None) + db.quota_create(self.context, self.project_id, 'floating_ips', None) floating_ips = quota.allowed_floating_ips(self.context, 100) self.assertEqual(floating_ips, 100) floating_ips = quota.allowed_floating_ips(self.context, 101) self.assertEqual(floating_ips, 101) def test_unlimited_metadata_items(self): - FLAGS.quota_metadata_items = 10 + self.flags(quota_metadata_items=10) items = quota.allowed_metadata_items(self.context, 100) self.assertEqual(items, 10) - db.quota_create(self.context, self.project.id, 'metadata_items', None) + db.quota_create(self.context, self.project_id, 'metadata_items', None) items = quota.allowed_metadata_items(self.context, 100) self.assertEqual(items, 100) items = quota.allowed_metadata_items(self.context, 101) @@ -273,11 +257,11 @@ class QuotaTestCase(test.TestCase): address = '192.168.0.100' db.floating_ip_create(context.get_admin_context(), {'address': address, - 'project_id': self.project.id}) + 'project_id': self.project_id}) self.assertRaises(quota.QuotaError, self.network.allocate_floating_ip, self.context, - self.project.id) + self.project_id) db.floating_ip_destroy(context.get_admin_context(), address) def test_too_many_metadata_items(self): @@ -294,49 +278,49 @@ class QuotaTestCase(test.TestCase): metadata=metadata) def test_default_allowed_injected_files(self): - FLAGS.quota_max_injected_files = 55 + self.flags(quota_max_injected_files=55) self.assertEqual(quota.allowed_injected_files(self.context, 100), 55) def test_overridden_allowed_injected_files(self): - FLAGS.quota_max_injected_files = 5 - db.quota_create(self.context, self.project.id, 'injected_files', 77) + self.flags(quota_max_injected_files=5) + db.quota_create(self.context, self.project_id, 'injected_files', 77) self.assertEqual(quota.allowed_injected_files(self.context, 100), 77) def test_unlimited_default_allowed_injected_files(self): - FLAGS.quota_max_injected_files = -1 + self.flags(quota_max_injected_files=-1) self.assertEqual(quota.allowed_injected_files(self.context, 100), 100) def test_unlimited_db_allowed_injected_files(self): - FLAGS.quota_max_injected_files = 5 - db.quota_create(self.context, self.project.id, 'injected_files', None) + self.flags(quota_max_injected_files=5) + db.quota_create(self.context, self.project_id, 'injected_files', None) self.assertEqual(quota.allowed_injected_files(self.context, 100), 100) def test_default_allowed_injected_file_content_bytes(self): - FLAGS.quota_max_injected_file_content_bytes = 12345 + self.flags(quota_max_injected_file_content_bytes=12345) limit = quota.allowed_injected_file_content_bytes(self.context, 23456) self.assertEqual(limit, 12345) def test_overridden_allowed_injected_file_content_bytes(self): - FLAGS.quota_max_injected_file_content_bytes = 12345 - db.quota_create(self.context, self.project.id, + self.flags(quota_max_injected_file_content_bytes=12345) + db.quota_create(self.context, self.project_id, 'injected_file_content_bytes', 5678) limit = quota.allowed_injected_file_content_bytes(self.context, 23456) self.assertEqual(limit, 5678) def test_unlimited_default_allowed_injected_file_content_bytes(self): - FLAGS.quota_max_injected_file_content_bytes = -1 + self.flags(quota_max_injected_file_content_bytes=-1) limit = quota.allowed_injected_file_content_bytes(self.context, 23456) self.assertEqual(limit, 23456) def test_unlimited_db_allowed_injected_file_content_bytes(self): - FLAGS.quota_max_injected_file_content_bytes = 12345 - db.quota_create(self.context, self.project.id, + self.flags(quota_max_injected_file_content_bytes=12345) + db.quota_create(self.context, self.project_id, 'injected_file_content_bytes', None) limit = quota.allowed_injected_file_content_bytes(self.context, 23456) self.assertEqual(limit, 23456) def _create_with_injected_files(self, files): - FLAGS.image_service = 'nova.image.fake.FakeImageService' + self.flags(image_service='nova.image.fake.FakeImageService') api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') api.create(self.context, min_count=1, max_count=1, @@ -344,7 +328,7 @@ class QuotaTestCase(test.TestCase): injected_files=files) def test_no_injected_files(self): - FLAGS.image_service = 'nova.image.fake.FakeImageService' + self.flags(image_service='nova.image.fake.FakeImageService') api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') api.create(self.context, instance_type=inst_type, image_href='3') diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index ffd748efe..ba9c0a859 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -20,24 +20,23 @@ Unit Tests for remote procedure calls using queue """ from nova import context -from nova import flags from nova import log as logging from nova import rpc from nova import test -FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.rpc') class RpcTestCase(test.TestCase): def setUp(self): super(RpcTestCase, self).setUp() - self.conn = rpc.Connection.instance(True) + self.conn = rpc.create_connection(True) self.receiver = TestReceiver() - self.consumer = rpc.TopicAdapterConsumer(connection=self.conn, - topic='test', - proxy=self.receiver) + self.consumer = rpc.create_consumer(self.conn, + 'test', + self.receiver, + False) self.consumer.attach_to_eventlet() self.context = context.get_admin_context() @@ -129,6 +128,8 @@ class RpcTestCase(test.TestCase): """Calls echo in the passed queue""" LOG.debug(_("Nested received %(queue)s, %(value)s") % locals()) + # TODO: so, it will replay the context and use the same REQID? + # that's bizarre. ret = rpc.call(context, queue, {"method": "echo", @@ -137,10 +138,11 @@ class RpcTestCase(test.TestCase): return value nested = Nested() - conn = rpc.Connection.instance(True) - consumer = rpc.TopicAdapterConsumer(connection=conn, - topic='nested', - proxy=nested) + conn = rpc.create_connection(True) + consumer = rpc.create_consumer(conn, + 'nested', + nested, + False) consumer.attach_to_eventlet() value = 42 result = rpc.call(self.context, @@ -149,47 +151,6 @@ class RpcTestCase(test.TestCase): "value": value}}) self.assertEqual(value, result) - def test_connectionpool_single(self): - """Test that ConnectionPool recycles a single connection.""" - conn1 = rpc.ConnectionPool.get() - rpc.ConnectionPool.put(conn1) - conn2 = rpc.ConnectionPool.get() - rpc.ConnectionPool.put(conn2) - self.assertEqual(conn1, conn2) - - def test_connectionpool_double(self): - """Test that ConnectionPool returns and reuses separate connections. - - When called consecutively we should get separate connections and upon - returning them those connections should be reused for future calls - before generating a new connection. - - """ - conn1 = rpc.ConnectionPool.get() - conn2 = rpc.ConnectionPool.get() - - self.assertNotEqual(conn1, conn2) - rpc.ConnectionPool.put(conn1) - rpc.ConnectionPool.put(conn2) - - conn3 = rpc.ConnectionPool.get() - conn4 = rpc.ConnectionPool.get() - self.assertEqual(conn1, conn3) - self.assertEqual(conn2, conn4) - - def test_connectionpool_limit(self): - """Test connection pool limit and connection uniqueness.""" - max_size = FLAGS.rpc_conn_pool_size - conns = [] - - for i in xrange(max_size): - conns.append(rpc.ConnectionPool.get()) - - self.assertFalse(rpc.ConnectionPool.free_items) - self.assertEqual(rpc.ConnectionPool.current_size, - rpc.ConnectionPool.max_size) - self.assertEqual(len(set(conns)), max_size) - class TestReceiver(object): """Simple Proxy class so the consumer has methods to call. diff --git a/nova/tests/test_rpc_amqp.py b/nova/tests/test_rpc_amqp.py new file mode 100644 index 000000000..2215a908b --- /dev/null +++ b/nova/tests/test_rpc_amqp.py @@ -0,0 +1,88 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests For RPC AMQP. +""" + +from nova import context +from nova import log as logging +from nova import rpc +from nova.rpc import amqp +from nova import test + + +LOG = logging.getLogger('nova.tests.rpc') + + +class RpcAMQPTestCase(test.TestCase): + def setUp(self): + super(RpcAMQPTestCase, self).setUp() + self.conn = rpc.create_connection(True) + self.receiver = TestReceiver() + self.consumer = rpc.create_consumer(self.conn, + 'test', + self.receiver, + False) + self.consumer.attach_to_eventlet() + self.context = context.get_admin_context() + + def test_connectionpool_single(self): + """Test that ConnectionPool recycles a single connection.""" + conn1 = amqp.ConnectionPool.get() + amqp.ConnectionPool.put(conn1) + conn2 = amqp.ConnectionPool.get() + amqp.ConnectionPool.put(conn2) + self.assertEqual(conn1, conn2) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. + + """ + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in.""" + LOG.debug(_("Received %s"), value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context.""" + LOG.debug(_("Received %s"), context) + return context.to_dict() + + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value + 1) + context.reply(value + 2) + + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in.""" + raise Exception(value) diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index f45f76b73..8f92406ff 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -33,7 +33,6 @@ from nova import manager from nova import wsgi from nova.compute import manager as compute_manager -FLAGS = flags.FLAGS flags.DEFINE_string("fake_manager", "nova.tests.test_service.FakeManager", "Manager for testing") @@ -109,103 +108,8 @@ class ServiceTestCase(test.TestCase): # the looping calls are created in StartService. app = service.Service.create(host=host, binary=binary, topic=topic) - self.mox.StubOutWithMock(service.rpc.Connection, 'instance') - service.rpc.Connection.instance(new=mox.IgnoreArg()) - - self.mox.StubOutWithMock(rpc, - 'TopicAdapterConsumer', - use_mock_anything=True) - self.mox.StubOutWithMock(rpc, - 'FanoutAdapterConsumer', - use_mock_anything=True) - - self.mox.StubOutWithMock(rpc, - 'ConsumerSet', - use_mock_anything=True) - - rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), - topic=topic, - proxy=mox.IsA(service.Service)).AndReturn( - rpc.TopicAdapterConsumer) - - rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), - topic='%s.%s' % (topic, host), - proxy=mox.IsA(service.Service)).AndReturn( - rpc.TopicAdapterConsumer) - - rpc.FanoutAdapterConsumer(connection=mox.IgnoreArg(), - topic=topic, - proxy=mox.IsA(service.Service)).AndReturn( - rpc.FanoutAdapterConsumer) - - def wait_func(self, limit=None): - return None - - mock_cset = self.mox.CreateMock(rpc.ConsumerSet, - {'wait': wait_func}) - rpc.ConsumerSet(connection=mox.IgnoreArg(), - consumer_list=mox.IsA(list)).AndReturn(mock_cset) - wait_func(mox.IgnoreArg()) - - service_create = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0, - 'availability_zone': 'nova'} - service_ref = {'host': host, - 'binary': binary, - 'report_count': 0, - 'id': 1} - - service.db.service_get_by_args(mox.IgnoreArg(), - host, - binary).AndRaise(exception.NotFound()) - service.db.service_create(mox.IgnoreArg(), - service_create).AndReturn(service_ref) - self.mox.ReplayAll() - - app.start() - app.stop() self.assert_(app) - # We're testing sort of weird behavior in how report_state decides - # whether it is disconnected, it looks for a variable on itself called - # 'model_disconnected' and report_state doesn't really do much so this - # these are mostly just for coverage - def test_report_state_no_service(self): - host = 'foo' - binary = 'bar' - topic = 'test' - service_create = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0, - 'availability_zone': 'nova'} - service_ref = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0, - 'availability_zone': 'nova', - 'id': 1} - - service.db.service_get_by_args(mox.IgnoreArg(), - host, - binary).AndRaise(exception.NotFound()) - service.db.service_create(mox.IgnoreArg(), - service_create).AndReturn(service_ref) - service.db.service_get(mox.IgnoreArg(), - service_ref['id']).AndReturn(service_ref) - service.db.service_update(mox.IgnoreArg(), service_ref['id'], - mox.ContainsKeyValue('report_count', 1)) - - self.mox.ReplayAll() - serv = service.Service(host, - binary, - topic, - 'nova.tests.test_service.FakeManager') - serv.start() - serv.report_state() - def test_report_state_newly_disconnected(self): host = 'foo' binary = 'bar' @@ -276,81 +180,6 @@ class ServiceTestCase(test.TestCase): self.assert_(not serv.model_disconnected) - def test_compute_can_update_available_resource(self): - """Confirm compute updates their record of compute-service table.""" - host = 'foo' - binary = 'nova-compute' - topic = 'compute' - - # Any mocks are not working without UnsetStubs() here. - self.mox.UnsetStubs() - ctxt = context.get_admin_context() - service_ref = db.service_create(ctxt, {'host': host, - 'binary': binary, - 'topic': topic}) - serv = service.Service(host, - binary, - topic, - 'nova.compute.manager.ComputeManager') - - # This testcase want to test calling update_available_resource. - # No need to call periodic call, then below variable must be set 0. - serv.report_interval = 0 - serv.periodic_interval = 0 - - # Creating mocks - self.mox.StubOutWithMock(service.rpc.Connection, 'instance') - service.rpc.Connection.instance(new=mox.IgnoreArg()) - - self.mox.StubOutWithMock(rpc, - 'TopicAdapterConsumer', - use_mock_anything=True) - self.mox.StubOutWithMock(rpc, - 'FanoutAdapterConsumer', - use_mock_anything=True) - - self.mox.StubOutWithMock(rpc, - 'ConsumerSet', - use_mock_anything=True) - - rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), - topic=topic, - proxy=mox.IsA(service.Service)).AndReturn( - rpc.TopicAdapterConsumer) - - rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), - topic='%s.%s' % (topic, host), - proxy=mox.IsA(service.Service)).AndReturn( - rpc.TopicAdapterConsumer) - - rpc.FanoutAdapterConsumer(connection=mox.IgnoreArg(), - topic=topic, - proxy=mox.IsA(service.Service)).AndReturn( - rpc.FanoutAdapterConsumer) - - def wait_func(self, limit=None): - return None - - mock_cset = self.mox.CreateMock(rpc.ConsumerSet, - {'wait': wait_func}) - rpc.ConsumerSet(connection=mox.IgnoreArg(), - consumer_list=mox.IsA(list)).AndReturn(mock_cset) - wait_func(mox.IgnoreArg()) - - self.mox.StubOutWithMock(serv.manager.driver, - 'update_available_resource') - serv.manager.driver.update_available_resource(mox.IgnoreArg(), host) - - # Just doing start()-stop(), not confirm new db record is created, - # because update_available_resource() works only in - # libvirt environment. This testcase confirms - # update_available_resource() is called. Otherwise, mox complains. - self.mox.ReplayAll() - serv.start() - serv.stop() - - db.service_destroy(ctxt, service_ref['id']) - class TestWSGIService(test.TestCase): diff --git a/nova/tests/test_skip_examples.py b/nova/tests/test_skip_examples.py new file mode 100644 index 000000000..8ca203442 --- /dev/null +++ b/nova/tests/test_skip_examples.py @@ -0,0 +1,47 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import test + + +class ExampleSkipTestCase(test.TestCase): + test_counter = 0 + + @test.skip_test("Example usage of @test.skip_test()") + def test_skip_test_example(self): + self.fail("skip_test failed to work properly.") + + @test.skip_if(True, "Example usage of @test.skip_if()") + def test_skip_if_example(self): + self.fail("skip_if failed to work properly.") + + @test.skip_unless(False, "Example usage of @test.skip_unless()") + def test_skip_unless_example(self): + self.fail("skip_unless failed to work properly.") + + @test.skip_if(False, "This test case should never be skipped.") + def test_001_increase_test_counter(self): + ExampleSkipTestCase.test_counter += 1 + + @test.skip_unless(True, "This test case should never be skipped.") + def test_002_increase_test_counter(self): + ExampleSkipTestCase.test_counter += 1 + + def test_003_verify_test_counter(self): + self.assertEquals(ExampleSkipTestCase.test_counter, 2, + "Tests were not skipped appropriately") diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py index 35c838065..64f11fa45 100644 --- a/nova/tests/test_test.py +++ b/nova/tests/test_test.py @@ -33,8 +33,13 @@ class IsolationTestCase(test.TestCase): self.start_service('compute') def test_rpc_consumer_isolation(self): - connection = rpc.Connection.instance(new=True) - consumer = rpc.TopicAdapterConsumer(connection, topic='compute') - consumer.register_callback( - lambda x, y: self.fail('I should never be called')) + class NeverCalled(object): + + def __getattribute__(*args): + assert False, "I should never get called." + + connection = rpc.create_connection(new=True) + proxy = NeverCalled() + consumer = rpc.create_consumer(connection, 'compute', + proxy, fanout=False) consumer.attach_to_eventlet() diff --git a/nova/tests/test_twistd.py b/nova/tests/test_twistd.py deleted file mode 100644 index ff8627c3b..000000000 --- a/nova/tests/test_twistd.py +++ /dev/null @@ -1,53 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import StringIO -import sys - -from nova import twistd -from nova import exception -from nova import flags -from nova import test - - -FLAGS = flags.FLAGS - - -class TwistdTestCase(test.TestCase): - def setUp(self): - super(TwistdTestCase, self).setUp() - self.Options = twistd.WrapTwistedOptions(twistd.TwistdServerOptions) - sys.stdout = StringIO.StringIO() - - def tearDown(self): - super(TwistdTestCase, self).tearDown() - sys.stdout = sys.__stdout__ - - def test_basic(self): - options = self.Options() - argv = options.parseOptions() - - def test_logfile(self): - options = self.Options() - argv = options.parseOptions(['--logfile=foo']) - self.assertEqual(FLAGS.logfile, 'foo') - - def test_help(self): - options = self.Options() - self.assertRaises(SystemExit, options.parseOptions, ['--help']) - self.assert_('pidfile' in sys.stdout.getvalue()) diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index 0c359e981..ec5098a37 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime import os import tempfile @@ -306,3 +307,80 @@ class IsUUIDLikeTestCase(test.TestCase): def test_non_uuid_string_passed(self): val = 'foo-fooo' self.assertUUIDLike(val, False) + + +class ToPrimitiveTestCase(test.TestCase): + def test_list(self): + self.assertEquals(utils.to_primitive([1, 2, 3]), [1, 2, 3]) + + def test_empty_list(self): + self.assertEquals(utils.to_primitive([]), []) + + def test_tuple(self): + self.assertEquals(utils.to_primitive((1, 2, 3)), [1, 2, 3]) + + def test_dict(self): + self.assertEquals(utils.to_primitive(dict(a=1, b=2, c=3)), + dict(a=1, b=2, c=3)) + + def test_empty_dict(self): + self.assertEquals(utils.to_primitive({}), {}) + + def test_datetime(self): + x = datetime.datetime(1, 2, 3, 4, 5, 6, 7) + self.assertEquals(utils.to_primitive(x), "0001-02-03 04:05:06.000007") + + def test_iter(self): + class IterClass(object): + def __init__(self): + self.data = [1, 2, 3, 4, 5] + self.index = 0 + + def __iter__(self): + return self + + def next(self): + if self.index == len(self.data): + raise StopIteration + self.index = self.index + 1 + return self.data[self.index - 1] + + x = IterClass() + self.assertEquals(utils.to_primitive(x), [1, 2, 3, 4, 5]) + + def test_iteritems(self): + class IterItemsClass(object): + def __init__(self): + self.data = dict(a=1, b=2, c=3).items() + self.index = 0 + + def __iter__(self): + return self + + def next(self): + if self.index == len(self.data): + raise StopIteration + self.index = self.index + 1 + return self.data[self.index - 1] + + x = IterItemsClass() + ordered = utils.to_primitive(x) + ordered.sort() + self.assertEquals(ordered, [['a', 1], ['b', 2], ['c', 3]]) + + def test_instance(self): + class MysteryClass(object): + a = 10 + + def __init__(self): + self.b = 1 + + x = MysteryClass() + self.assertEquals(utils.to_primitive(x, convert_instances=True), + dict(b=1)) + + self.assertEquals(utils.to_primitive(x), x) + + def test_typeerror(self): + x = bytearray # Class, not instance + self.assertEquals(utils.to_primitive(x), u"<type 'bytearray'>") diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py new file mode 100644 index 000000000..388f075af --- /dev/null +++ b/nova/tests/test_virt.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Isaku Yamahata +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import flags +from nova import test +from nova.virt import driver + +FLAGS = flags.FLAGS + + +class TestVirtDriver(test.TestCase): + def test_block_device(self): + swap = {'device_name': '/dev/sdb', + 'swap_size': 1} + ephemerals = [{'num': 0, + 'virtual_name': 'ephemeral0', + 'device_name': '/dev/sdc1', + 'size': 1}] + block_device_mapping = [{'mount_device': '/dev/sde', + 'device_path': 'fake_device'}] + block_device_info = { + 'root_device_name': '/dev/sda', + 'swap': swap, + 'ephemerals': ephemerals, + 'block_device_mapping': block_device_mapping} + + empty_block_device_info = {} + + self.assertEqual( + driver.block_device_info_get_root(block_device_info), '/dev/sda') + self.assertEqual( + driver.block_device_info_get_root(empty_block_device_info), None) + self.assertEqual( + driver.block_device_info_get_root(None), None) + + self.assertEqual( + driver.block_device_info_get_swap(block_device_info), swap) + self.assertEqual(driver.block_device_info_get_swap( + empty_block_device_info)['device_name'], None) + self.assertEqual(driver.block_device_info_get_swap( + empty_block_device_info)['swap_size'], 0) + self.assertEqual( + driver.block_device_info_get_swap({'swap': None})['device_name'], + None) + self.assertEqual( + driver.block_device_info_get_swap({'swap': None})['swap_size'], + 0) + self.assertEqual( + driver.block_device_info_get_swap(None)['device_name'], None) + self.assertEqual( + driver.block_device_info_get_swap(None)['swap_size'], 0) + + self.assertEqual( + driver.block_device_info_get_ephemerals(block_device_info), + ephemerals) + self.assertEqual( + driver.block_device_info_get_ephemerals(empty_block_device_info), + []) + self.assertEqual( + driver.block_device_info_get_ephemerals(None), + []) + + def test_swap_is_usable(self): + self.assertFalse(driver.swap_is_usable(None)) + self.assertFalse(driver.swap_is_usable({'device_name': None})) + self.assertFalse(driver.swap_is_usable({'device_name': '/dev/sdb', + 'swap_size': 0})) + self.assertTrue(driver.swap_is_usable({'device_name': '/dev/sdb', + 'swap_size': 1})) diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py index 7313508a6..06daf46e8 100644 --- a/nova/tests/test_vmwareapi.py +++ b/nova/tests/test_vmwareapi.py @@ -19,11 +19,11 @@ Test suite for VMWareAPI. """ +from nova import context from nova import db from nova import flags from nova import test from nova import utils -from nova.auth import manager from nova.compute import power_state from nova.tests.glance import stubs as glance_stubs from nova.tests.vmwareapi import db_fakes @@ -40,13 +40,13 @@ class VMWareAPIVMTestCase(test.TestCase): def setUp(self): super(VMWareAPIVMTestCase, self).setUp() + self.context = context.RequestContext('fake', 'fake', False) self.flags(vmwareapi_host_ip='test_url', vmwareapi_host_username='test_username', vmwareapi_host_password='test_pass') - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) self.network = utils.import_object(FLAGS.network_manager) vmwareapi_fake.reset() db_fakes.stub_out_db_instance_api(self.stubs) @@ -77,14 +77,12 @@ class VMWareAPIVMTestCase(test.TestCase): def tearDown(self): super(VMWareAPIVMTestCase, self).tearDown() vmwareapi_fake.cleanup() - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) def _create_instance_in_the_db(self): values = {'name': 1, 'id': 1, - 'project_id': self.project.id, - 'user_id': self.user.id, + 'project_id': self.project_id, + 'user_id': self.user_id, 'image_ref': "1", 'kernel_id': "1", 'ramdisk_id': "1", @@ -97,7 +95,7 @@ class VMWareAPIVMTestCase(test.TestCase): """Create and spawn the VM.""" self._create_instance_in_the_db() self.type_data = db.instance_type_get_by_name(None, 'm1.large') - self.conn.spawn(self.instance, self.network_info) + self.conn.spawn(self.context, self.instance, self.network_info) self._check_vm_record() def _check_vm_record(self): @@ -159,14 +157,14 @@ class VMWareAPIVMTestCase(test.TestCase): self._create_vm() info = self.conn.get_info(1) self._check_vm_info(info, power_state.RUNNING) - self.conn.snapshot(self.instance, "Test-Snapshot") + self.conn.snapshot(self.context, self.instance, "Test-Snapshot") info = self.conn.get_info(1) self._check_vm_info(info, power_state.RUNNING) def test_snapshot_non_existent(self): self._create_instance_in_the_db() - self.assertRaises(Exception, self.conn.snapshot, self.instance, - "Test-Snapshot") + self.assertRaises(Exception, self.conn.snapshot, self.context, + self.instance, "Test-Snapshot") def test_reboot(self): self._create_vm() diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 8b3b5fa28..dfc1eeb0a 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -30,7 +30,6 @@ from nova import flags from nova import log as logging from nova import test from nova import utils -from nova.auth import manager from nova.compute import instance_types from nova.compute import power_state from nova import exception @@ -69,15 +68,17 @@ class XenAPIVolumeTestCase(test.TestCase): def setUp(self): super(XenAPIVolumeTestCase, self).setUp() self.stubs = stubout.StubOutForTesting() - self.context = context.RequestContext('fake', 'fake', False) - FLAGS.target_host = '127.0.0.1' - FLAGS.xenapi_connection_url = 'test_url' - FLAGS.xenapi_connection_password = 'test_pass' + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) + self.flags(target_host='127.0.0.1', + xenapi_connection_url='test_url', + xenapi_connection_password='test_pass') db_fakes.stub_out_db_instance_api(self.stubs) stubs.stub_out_get_target(self.stubs) xenapi_fake.reset() self.values = {'id': 1, - 'project_id': 'fake', + 'project_id': self.user_id, 'user_id': 'fake', 'image_ref': 1, 'kernel_id': 2, @@ -169,14 +170,14 @@ def reset_network(*args): pass +def _find_rescue_vbd_ref(*args): + pass + + class XenAPIVMTestCase(test.TestCase): """Unit tests for VM operations.""" def setUp(self): super(XenAPIVMTestCase, self).setUp() - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') self.network = utils.import_object(FLAGS.network_manager) self.stubs = stubout.StubOutForTesting() self.flags(xenapi_connection_url='test_url', @@ -192,10 +193,14 @@ class XenAPIVMTestCase(test.TestCase): stubs.stubout_stream_disk(self.stubs) stubs.stubout_is_vdi_pv(self.stubs) self.stubs.Set(vmops.VMOps, 'reset_network', reset_network) + self.stubs.Set(vmops.VMOps, '_find_rescue_vbd_ref', + _find_rescue_vbd_ref) stubs.stub_out_vm_methods(self.stubs) glance_stubs.stubout_glance_client(self.stubs) fake_utils.stub_out_utils_execute(self.stubs) - self.context = context.RequestContext('fake', 'fake', False) + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) self.conn = xenapi_conn.get_connection(False) def test_parallel_builds(self): @@ -227,10 +232,10 @@ class XenAPIVMTestCase(test.TestCase): 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] instance = db.instance_create(self.context, values) - self.conn.spawn(instance, network_info) + self.conn.spawn(self.context, instance, network_info) - gt1 = eventlet.spawn(_do_build, 1, self.project.id, self.user.id) - gt2 = eventlet.spawn(_do_build, 2, self.project.id, self.user.id) + gt1 = eventlet.spawn(_do_build, 1, self.project_id, self.user_id) + gt2 = eventlet.spawn(_do_build, 2, self.project_id, self.user_id) gt1.wait() gt2.wait() @@ -257,14 +262,15 @@ class XenAPIVMTestCase(test.TestCase): instance = self._create_instance() name = "MySnapshot" - self.assertRaises(exception.Error, self.conn.snapshot, instance, name) + self.assertRaises(exception.Error, self.conn.snapshot, + self.context, instance, name) def test_instance_snapshot(self): stubs.stubout_instance_snapshot(self.stubs) instance = self._create_instance() name = "MySnapshot" - template_vm_ref = self.conn.snapshot(instance, name) + template_vm_ref = self.conn.snapshot(self.context, instance, name) def ensure_vm_was_torn_down(): vm_labels = [] @@ -397,12 +403,12 @@ class XenAPIVMTestCase(test.TestCase): instance_type_id="3", os_type="linux", architecture="x86-64", instance_id=1, check_injection=False, - create_record=True): + create_record=True, empty_dns=False): stubs.stubout_loopingcall_start(self.stubs) if create_record: values = {'id': instance_id, - 'project_id': self.project.id, - 'user_id': self.user.id, + 'project_id': self.project_id, + 'user_id': self.user_id, 'image_ref': image_ref, 'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id, @@ -426,14 +432,23 @@ class XenAPIVMTestCase(test.TestCase): 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] - self.conn.spawn(instance, network_info) + if empty_dns: + network_info[0][1]['dns'] = [] + + self.conn.spawn(self.context, instance, network_info) self.create_vm_record(self.conn, os_type, instance_id) self.check_vm_record(self.conn, check_injection) self.assertTrue(instance.os_type) self.assertTrue(instance.architecture) + def test_spawn_empty_dns(self): + """"Test spawning with an empty dns list""" + self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, + os_type="linux", architecture="x86-64", + empty_dns=True) + self.check_vm_params_for_linux() + def test_spawn_not_enough_memory(self): - FLAGS.xenapi_image_service = 'glance' self.assertRaises(Exception, self._test_spawn, 1, 2, 3, "4") # m1.xlarge @@ -445,7 +460,6 @@ class XenAPIVMTestCase(test.TestCase): """ vdi_recs_start = self._list_vdis() - FLAGS.xenapi_image_service = 'glance' stubs.stubout_fetch_image_glance_disk(self.stubs) self.assertRaises(xenapi_fake.Failure, self._test_spawn, 1, 2, 3) @@ -460,7 +474,6 @@ class XenAPIVMTestCase(test.TestCase): """ vdi_recs_start = self._list_vdis() - FLAGS.xenapi_image_service = 'glance' stubs.stubout_create_vm(self.stubs) self.assertRaises(xenapi_fake.Failure, self._test_spawn, 1, 2, 3) @@ -468,22 +481,12 @@ class XenAPIVMTestCase(test.TestCase): vdi_recs_end = self._list_vdis() self._check_vdis(vdi_recs_start, vdi_recs_end) - def test_spawn_raw_objectstore(self): - FLAGS.xenapi_image_service = 'objectstore' - self._test_spawn(1, None, None) - - def test_spawn_objectstore(self): - FLAGS.xenapi_image_service = 'objectstore' - self._test_spawn(1, 2, 3) - @stub_vm_utils_with_vdi_attached_here def test_spawn_raw_glance(self): - FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None) self.check_vm_params_for_linux() def test_spawn_vhd_glance_linux(self): - FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, os_type="linux", architecture="x86-64") self.check_vm_params_for_linux() @@ -512,20 +515,17 @@ class XenAPIVMTestCase(test.TestCase): self.assertEqual(len(self.vm['VBDs']), 1) def test_spawn_vhd_glance_windows(self): - FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, os_type="windows", architecture="i386") self.check_vm_params_for_windows() def test_spawn_glance(self): - FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, glance_stubs.FakeGlance.IMAGE_KERNEL, glance_stubs.FakeGlance.IMAGE_RAMDISK) self.check_vm_params_for_linux_with_external_kernel() def test_spawn_netinject_file(self): - FLAGS.xenapi_image_service = 'glance' db_fakes.stub_out_db_instance_api(self.stubs, injected=True) self._tee_executed = False @@ -551,7 +551,6 @@ class XenAPIVMTestCase(test.TestCase): # Capture the sudo tee .../etc/network/interfaces command (r'(sudo\s+)?tee.*interfaces', _tee_handler), ]) - FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, glance_stubs.FakeGlance.IMAGE_KERNEL, glance_stubs.FakeGlance.IMAGE_RAMDISK, @@ -559,7 +558,6 @@ class XenAPIVMTestCase(test.TestCase): self.assertTrue(self._tee_executed) def test_spawn_netinject_xenstore(self): - FLAGS.xenapi_image_service = 'glance' db_fakes.stub_out_db_instance_api(self.stubs, injected=True) self._tee_executed = False @@ -604,7 +602,7 @@ class XenAPIVMTestCase(test.TestCase): self.assertFalse(self._tee_executed) def test_spawn_vlanmanager(self): - self.flags(xenapi_image_service='glance', + self.flags(image_service='nova.image.glance.GlanceImageService', network_manager='nova.network.manager.VlanManager', vlan_interface='fake0') @@ -626,7 +624,7 @@ class XenAPIVMTestCase(test.TestCase): host=FLAGS.host, vpn=None, instance_type_id=1, - project_id=self.project.id) + project_id=self.project_id) self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, glance_stubs.FakeGlance.IMAGE_KERNEL, glance_stubs.FakeGlance.IMAGE_RAMDISK, @@ -648,7 +646,7 @@ class XenAPIVMTestCase(test.TestCase): self.flags(flat_injected=False) instance = self._create_instance() conn = xenapi_conn.get_connection(False) - conn.rescue(instance, None, []) + conn.rescue(self.context, instance, None, []) def test_unrescue(self): instance = self._create_instance() @@ -656,21 +654,13 @@ class XenAPIVMTestCase(test.TestCase): # Ensure that it will not unrescue a non-rescued instance. self.assertRaises(Exception, conn.unrescue, instance, None) - def tearDown(self): - super(XenAPIVMTestCase, self).tearDown() - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - self.vm_info = None - self.vm = None - self.stubs.UnsetAll() - def _create_instance(self, instance_id=1, spawn=True): """Creates and spawns a test instance.""" stubs.stubout_loopingcall_start(self.stubs) values = { 'id': instance_id, - 'project_id': self.project.id, - 'user_id': self.user.id, + 'project_id': self.project_id, + 'user_id': self.user_id, 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, @@ -693,7 +683,7 @@ class XenAPIVMTestCase(test.TestCase): 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] if spawn: - self.conn.spawn(instance, network_info) + self.conn.spawn(self.context, instance, network_info) return instance @@ -745,21 +735,19 @@ class XenAPIMigrateInstance(test.TestCase): def setUp(self): super(XenAPIMigrateInstance, self).setUp() self.stubs = stubout.StubOutForTesting() - FLAGS.target_host = '127.0.0.1' - FLAGS.xenapi_connection_url = 'test_url' - FLAGS.xenapi_connection_password = 'test_pass' + self.flags(target_host='127.0.0.1', + xenapi_connection_url='test_url', + xenapi_connection_password='test_pass') db_fakes.stub_out_db_instance_api(self.stubs) stubs.stub_out_get_target(self.stubs) xenapi_fake.reset() xenapi_fake.create_network('fake', FLAGS.flat_network_bridge) - self.manager = manager.AuthManager() - self.user = self.manager.create_user('fake', 'fake', 'fake', - admin=True) - self.project = self.manager.create_project('fake', 'fake', 'fake') - self.context = context.RequestContext('fake', 'fake', False) + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) self.values = {'id': 1, - 'project_id': self.project.id, - 'user_id': self.user.id, + 'project_id': self.project_id, + 'user_id': self.user_id, 'image_ref': 1, 'kernel_id': None, 'ramdisk_id': None, @@ -773,20 +761,107 @@ class XenAPIMigrateInstance(test.TestCase): stubs.stubout_get_this_vm_uuid(self.stubs) glance_stubs.stubout_glance_client(self.stubs) - def tearDown(self): - super(XenAPIMigrateInstance, self).tearDown() - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - self.stubs.UnsetAll() - def test_migrate_disk_and_power_off(self): instance = db.instance_create(self.context, self.values) stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) conn = xenapi_conn.get_connection(False) conn.migrate_disk_and_power_off(instance, '127.0.0.1') - def test_finish_resize(self): + def test_revert_migrate(self): + instance = db.instance_create(self.context, self.values) + self.called = False + self.fake_vm_start_called = False + self.fake_revert_migration_called = False + + def fake_vm_start(*args, **kwargs): + self.fake_vm_start_called = True + + def fake_vdi_resize(*args, **kwargs): + self.called = True + + def fake_revert_migration(*args, **kwargs): + self.fake_revert_migration_called = True + + self.stubs.Set(stubs.FakeSessionForMigrationTests, + "VDI_resize_online", fake_vdi_resize) + self.stubs.Set(vmops.VMOps, '_start', fake_vm_start) + self.stubs.Set(vmops.VMOps, 'revert_migration', fake_revert_migration) + + stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) + stubs.stubout_loopingcall_start(self.stubs) + conn = xenapi_conn.get_connection(False) + network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, + {'broadcast': '192.168.0.255', + 'dns': ['192.168.0.1'], + 'gateway': '192.168.0.1', + 'gateway6': 'dead:beef::1', + 'ip6s': [{'enabled': '1', + 'ip': 'dead:beef::dcad:beff:feef:0', + 'netmask': '64'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.0.100', + 'netmask': '255.255.255.0'}], + 'label': 'fake', + 'mac': 'DE:AD:BE:EF:00:00', + 'rxtx_cap': 3})] + conn.finish_migration(self.context, instance, + dict(base_copy='hurr', cow='durr'), + network_info, resize_instance=True) + self.assertEqual(self.called, True) + self.assertEqual(self.fake_vm_start_called, True) + + conn.revert_migration(instance) + self.assertEqual(self.fake_revert_migration_called, True) + + def test_finish_migrate(self): + instance = db.instance_create(self.context, self.values) + self.called = False + self.fake_vm_start_called = False + + def fake_vm_start(*args, **kwargs): + self.fake_vm_start_called = True + + def fake_vdi_resize(*args, **kwargs): + self.called = True + + self.stubs.Set(stubs.FakeSessionForMigrationTests, + "VDI_resize_online", fake_vdi_resize) + self.stubs.Set(vmops.VMOps, '_start', fake_vm_start) + + stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) + stubs.stubout_loopingcall_start(self.stubs) + conn = xenapi_conn.get_connection(False) + network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, + {'broadcast': '192.168.0.255', + 'dns': ['192.168.0.1'], + 'gateway': '192.168.0.1', + 'gateway6': 'dead:beef::1', + 'ip6s': [{'enabled': '1', + 'ip': 'dead:beef::dcad:beff:feef:0', + 'netmask': '64'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.0.100', + 'netmask': '255.255.255.0'}], + 'label': 'fake', + 'mac': 'DE:AD:BE:EF:00:00', + 'rxtx_cap': 3})] + conn.finish_migration(self.context, instance, + dict(base_copy='hurr', cow='durr'), + network_info, resize_instance=True) + self.assertEqual(self.called, True) + self.assertEqual(self.fake_vm_start_called, True) + + def test_finish_migrate_no_local_storage(self): + tiny_type_id = \ + instance_types.get_instance_type_by_name('m1.tiny')['id'] + self.values.update({'instance_type_id': tiny_type_id, 'local_gb': 0}) instance = db.instance_create(self.context, self.values) + + def fake_vdi_resize(*args, **kwargs): + raise Exception("This shouldn't be called") + + self.stubs.Set(stubs.FakeSessionForMigrationTests, + "VDI_resize_online", fake_vdi_resize) stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) stubs.stubout_loopingcall_start(self.stubs) conn = xenapi_conn.get_connection(False) @@ -804,8 +879,56 @@ class XenAPIMigrateInstance(test.TestCase): 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})] - conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'), - network_info) + conn.finish_migration(self.context, instance, + dict(base_copy='hurr', cow='durr'), + network_info, resize_instance=True) + + def test_finish_migrate_no_resize_vdi(self): + instance = db.instance_create(self.context, self.values) + + def fake_vdi_resize(*args, **kwargs): + raise Exception("This shouldn't be called") + + self.stubs.Set(stubs.FakeSessionForMigrationTests, + "VDI_resize_online", fake_vdi_resize) + stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) + stubs.stubout_loopingcall_start(self.stubs) + conn = xenapi_conn.get_connection(False) + network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, + {'broadcast': '192.168.0.255', + 'dns': ['192.168.0.1'], + 'gateway': '192.168.0.1', + 'gateway6': 'dead:beef::1', + 'ip6s': [{'enabled': '1', + 'ip': 'dead:beef::dcad:beff:feef:0', + 'netmask': '64'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.0.100', + 'netmask': '255.255.255.0'}], + 'label': 'fake', + 'mac': 'DE:AD:BE:EF:00:00', + 'rxtx_cap': 3})] + + # Resize instance would be determined by the compute call + conn.finish_migration(self.context, instance, + dict(base_copy='hurr', cow='durr'), + network_info, resize_instance=False) + + +class XenAPIImageTypeTestCase(test.TestCase): + """Test ImageType class.""" + + def test_to_string(self): + """Can convert from type id to type string.""" + self.assertEquals( + vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL), + vm_utils.ImageType.KERNEL_STR) + + def test_from_string(self): + """Can convert from string to type id.""" + self.assertEquals( + vm_utils.ImageType.from_string(vm_utils.ImageType.KERNEL_STR), + vm_utils.ImageType.KERNEL) class XenAPIDetermineDiskImageTestCase(test.TestCase): @@ -829,7 +952,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): def test_instance_disk(self): """If a kernel is specified, the image type is DISK (aka machine).""" - FLAGS.xenapi_image_service = 'objectstore' self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL self.assert_disk_type(vm_utils.ImageType.DISK) @@ -839,7 +961,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): If the kernel isn't specified, and we're not using Glance, then DISK_RAW is assumed. """ - FLAGS.xenapi_image_service = 'objectstore' self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_RAW) @@ -849,7 +970,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): If we're using Glance, then defer to the image_type field, which in this case will be 'raw'. """ - FLAGS.xenapi_image_service = 'glance' self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_RAW) @@ -859,7 +979,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): If we're using Glance, then defer to the image_type field, which in this case will be 'vhd'. """ - FLAGS.xenapi_image_service = 'glance' self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_VHD) diff --git a/nova/tests/test_zones.py b/nova/tests/test_zones.py index a943fee27..9efa23015 100644 --- a/nova/tests/test_zones.py +++ b/nova/tests/test_zones.py @@ -18,7 +18,6 @@ Tests For ZoneManager import datetime import mox -import novaclient from nova import context from nova import db diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 66c79d465..0d0f84e32 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -28,8 +28,8 @@ from nova import utils def stubout_instance_snapshot(stubs): @classmethod - def fake_fetch_image(cls, session, instance_id, image, user, project, - type): + def fake_fetch_image(cls, context, session, instance_id, image, user, + project, type): from nova.virt.xenapi.fake import create_vdi name_label = "instance-%s" % instance_id #TODO: create fake SR record @@ -227,7 +227,7 @@ def stub_out_vm_methods(stubs): def fake_release_bootlock(self, vm): pass - def fake_spawn_rescue(self, inst): + def fake_spawn_rescue(self, context, inst, network_info): inst._rescue = False stubs.Set(vmops.VMOps, "_shutdown", fake_shutdown) diff --git a/nova/twistd.py b/nova/twistd.py deleted file mode 100644 index 15cf67825..000000000 --- a/nova/twistd.py +++ /dev/null @@ -1,267 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Twisted daemon helpers, specifically to parse out gFlags from twisted flags, -manage pid files and support syslogging. -""" - -import gflags -import os -import signal -import sys -import time -from twisted.scripts import twistd -from twisted.python import log -from twisted.python import reflect -from twisted.python import runtime -from twisted.python import usage - -from nova import flags -from nova import log as logging - - -if runtime.platformType == "win32": - from twisted.scripts._twistw import ServerOptions -else: - from twisted.scripts._twistd_unix import ServerOptions - - -FLAGS = flags.FLAGS - - -class TwistdServerOptions(ServerOptions): - def parseArgs(self, *args): - return - - -class FlagParser(object): - # this is a required attribute for gflags - syntactic_help = '' - - def __init__(self, parser): - self.parser = parser - - def Parse(self, s): - return self.parser(s) - - -def WrapTwistedOptions(wrapped): - class TwistedOptionsToFlags(wrapped): - subCommands = None - - def __init__(self): - # NOTE(termie): _data exists because Twisted stuff expects - # to be able to set arbitrary things that are - # not actual flags - self._data = {} - self._flagHandlers = {} - self._paramHandlers = {} - - # Absorb the twistd flags into our FLAGS - self._absorbFlags() - self._absorbParameters() - self._absorbHandlers() - - wrapped.__init__(self) - - def _absorbFlags(self): - twistd_flags = [] - reflect.accumulateClassList(self.__class__, 'optFlags', - twistd_flags) - for flag in twistd_flags: - key = flag[0].replace('-', '_') - if hasattr(FLAGS, key): - continue - flags.DEFINE_boolean(key, None, str(flag[-1])) - - def _absorbParameters(self): - twistd_params = [] - reflect.accumulateClassList(self.__class__, 'optParameters', - twistd_params) - for param in twistd_params: - key = param[0].replace('-', '_') - if hasattr(FLAGS, key): - continue - if len(param) > 4: - flags.DEFINE(FlagParser(param[4]), - key, param[2], str(param[3]), - serializer=gflags.ArgumentSerializer()) - else: - flags.DEFINE_string(key, param[2], str(param[3])) - - def _absorbHandlers(self): - twistd_handlers = {} - reflect.addMethodNamesToDict(self.__class__, twistd_handlers, - "opt_") - - # NOTE(termie): Much of the following is derived/copied from - # twisted.python.usage with the express purpose of - # providing compatibility - for name in twistd_handlers.keys(): - method = getattr(self, 'opt_' + name) - - takesArg = not usage.flagFunction(method, name) - doc = getattr(method, '__doc__', None) - if not doc: - doc = 'undocumented' - - if not takesArg: - if name not in FLAGS: - flags.DEFINE_boolean(name, None, doc) - self._flagHandlers[name] = method - else: - if name not in FLAGS: - flags.DEFINE_string(name, None, doc) - self._paramHandlers[name] = method - - def _doHandlers(self): - for flag, handler in self._flagHandlers.iteritems(): - if self[flag]: - handler() - for param, handler in self._paramHandlers.iteritems(): - if self[param] is not None: - handler(self[param]) - - def __str__(self): - return str(FLAGS) - - def parseOptions(self, options=None): - if options is None: - options = sys.argv - else: - options.insert(0, '') - - args = FLAGS(options) - logging.setup() - argv = args[1:] - # ignore subcommands - - try: - self.parseArgs(*argv) - except TypeError: - raise usage.UsageError(_("Wrong number of arguments.")) - - self.postOptions() - return args - - def parseArgs(self, *args): - # TODO(termie): figure out a decent way of dealing with args - #return - wrapped.parseArgs(self, *args) - - def postOptions(self): - self._doHandlers() - - wrapped.postOptions(self) - - def __getitem__(self, key): - key = key.replace('-', '_') - try: - return getattr(FLAGS, key) - except (AttributeError, KeyError): - return self._data[key] - - def __setitem__(self, key, value): - key = key.replace('-', '_') - try: - return setattr(FLAGS, key, value) - except (AttributeError, KeyError): - self._data[key] = value - - def get(self, key, default): - key = key.replace('-', '_') - try: - return getattr(FLAGS, key) - except (AttributeError, KeyError): - self._data.get(key, default) - - return TwistedOptionsToFlags - - -def stop(pidfile): - """ - Stop the daemon - """ - # Get the pid from the pidfile - try: - pf = file(pidfile, 'r') - pid = int(pf.read().strip()) - pf.close() - except IOError: - pid = None - - if not pid: - message = _("pidfile %s does not exist. Daemon not running?\n") - sys.stderr.write(message % pidfile) - # Not an error in a restart - return - - # Try killing the daemon process - try: - while 1: - os.kill(pid, signal.SIGKILL) - time.sleep(0.1) - except OSError, err: - err = str(err) - if err.find(_("No such process")) > 0: - if os.path.exists(pidfile): - os.remove(pidfile) - else: - print str(err) - sys.exit(1) - - -def serve(filename): - logging.debug(_("Serving %s") % filename) - name = os.path.basename(filename) - OptionsClass = WrapTwistedOptions(TwistdServerOptions) - options = OptionsClass() - argv = options.parseOptions() - FLAGS.python = filename - FLAGS.no_save = True - if not FLAGS.pidfile: - FLAGS.pidfile = '%s.pid' % name - elif FLAGS.pidfile.endswith('twistd.pid'): - FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name) - if not FLAGS.prefix: - FLAGS.prefix = name - elif FLAGS.prefix.endswith('twisted'): - FLAGS.prefix = FLAGS.prefix.replace('twisted', name) - - action = 'start' - if len(argv) > 1: - action = argv.pop() - - if action == 'stop': - stop(FLAGS.pidfile) - sys.exit() - elif action == 'restart': - stop(FLAGS.pidfile) - elif action == 'start': - pass - else: - print 'usage: %s [options] [start|stop|restart]' % argv[0] - sys.exit(1) - - logging.debug(_("Full set of FLAGS:")) - for flag in FLAGS: - logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) - - logging.audit(_("Starting %s"), name) - twistd.runApp(options) diff --git a/nova/utils.py b/nova/utils.py index 8784a227d..372358b42 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -19,7 +19,6 @@ """Utilities and helper functions.""" -import base64 import datetime import functools import inspect @@ -30,7 +29,6 @@ import os import random import re import socket -import string import struct import sys import time @@ -50,7 +48,8 @@ from nova import version LOG = logging.getLogger("nova.utils") -TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" +ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" +PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" FLAGS = flags.FLAGS @@ -127,6 +126,22 @@ def fetchfile(url, target): def execute(*cmd, **kwargs): + """ + Helper method to execute command with optional retry. + + :cmd Passed to subprocess.Popen. + :process_input Send to opened process. + :addl_env Added to the processes env. + :check_exit_code Defaults to 0. Raise exception.ProcessExecutionError + unless program exits with this code. + :delay_on_retry True | False. Defaults to True. If set to True, wait a + short amount of time before retrying. + :attempts How many times to retry cmd. + + :raises exception.Error on receiving unknown arguments + :raises exception.ProcessExecutionError + """ + process_input = kwargs.pop('process_input', None) addl_env = kwargs.pop('addl_env', None) check_exit_code = kwargs.pop('check_exit_code', 0) @@ -224,7 +239,7 @@ def abspath(s): def novadir(): import nova - return os.path.abspath(nova.__file__).split('nova/__init__.pyc')[0] + return os.path.abspath(nova.__file__).split('nova/__init__.py')[0] def default_flagfile(filename='nova.conf', args=None): @@ -361,16 +376,26 @@ def clear_time_override(): utcnow.override_time = None -def isotime(at=None): - """Returns iso formatted utcnow.""" +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" if not at: at = utcnow() - return at.strftime(TIME_FORMAT) + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def isotime(at=None): + """Returns iso formatted utcnow.""" + return strtime(at, ISO_TIME_FORMAT) def parse_isotime(timestr): """Turn an iso formatted time back into a datetime.""" - return datetime.datetime.strptime(timestr, TIME_FORMAT) + return parse_strtime(timestr, ISO_TIME_FORMAT) def parse_mailmap(mailmap='.mailmap'): @@ -504,25 +529,61 @@ def utf8(value): return value -def to_primitive(value): - if type(value) is type([]) or type(value) is type((None,)): - o = [] - for v in value: - o.append(to_primitive(v)) - return o - elif type(value) is type({}): - o = {} - for k, v in value.iteritems(): - o[k] = to_primitive(v) - return o - elif isinstance(value, datetime.datetime): - return str(value) - elif hasattr(value, 'iteritems'): - return to_primitive(dict(value.iteritems())) - elif hasattr(value, '__iter__'): - return to_primitive(list(value)) - else: - return value +def to_primitive(value, convert_instances=False, level=0): + """Convert a complex object into primitives. + + Handy for JSON serialization. We can optionally handle instances, + but since this is a recursive function, we could have cyclical + data structures. + + To handle cyclical data structures we could track the actual objects + visited in a set, but not all objects are hashable. Instead we just + track the depth of the object inspections and don't go too deep. + + Therefore, convert_instances=True is lossy ... be aware. + + """ + if inspect.isclass(value): + return unicode(value) + + if level > 3: + return [] + + # The try block may not be necessary after the class check above, + # but just in case ... + try: + if type(value) is type([]) or type(value) is type((None,)): + o = [] + for v in value: + o.append(to_primitive(v, convert_instances=convert_instances, + level=level)) + return o + elif type(value) is type({}): + o = {} + for k, v in value.iteritems(): + o[k] = to_primitive(v, convert_instances=convert_instances, + level=level) + return o + elif isinstance(value, datetime.datetime): + return str(value) + elif hasattr(value, 'iteritems'): + return to_primitive(dict(value.iteritems()), + convert_instances=convert_instances, + level=level) + elif hasattr(value, '__iter__'): + return to_primitive(list(value), level) + elif convert_instances and hasattr(value, '__dict__'): + # Likely an instance of something. Watch for cycles. + # Ignore class member vars. + return to_primitive(value.__dict__, + convert_instances=convert_instances, + level=level + 1) + else: + return value + except TypeError, e: + # Class objects are tricky since they may define something like + # __iter__ defined but it isn't callable as list(). + return unicode(value) def dumps(value): @@ -745,7 +806,7 @@ def parse_server_string(server_str): (address, port) = server_str.split(':') return (address, port) - except: + except Exception: LOG.debug(_('Invalid server_string: %s' % server_str)) return ('', '') diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 34dc5f544..df4a66ac2 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -32,6 +32,33 @@ class InstanceInfo(object): self.state = state +def block_device_info_get_root(block_device_info): + block_device_info = block_device_info or {} + return block_device_info.get('root_device_name') + + +def block_device_info_get_swap(block_device_info): + block_device_info = block_device_info or {} + return block_device_info.get('swap') or {'device_name': None, + 'swap_size': 0} + + +def swap_is_usable(swap): + return swap and swap['device_name'] and swap['swap_size'] > 0 + + +def block_device_info_get_ephemerals(block_device_info): + block_device_info = block_device_info or {} + ephemerals = block_device_info.get('ephemerals') or [] + return ephemerals + + +def block_device_info_get_mapping(block_device_info): + block_device_info = block_device_info or {} + block_device_mapping = block_device_info.get('block_device_mapping') or [] + return block_device_mapping + + class ComputeDriver(object): """Base class for compute drivers. @@ -40,6 +67,7 @@ class ComputeDriver(object): def init_host(self, host): """Adopt existing VM's running here""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_info(self, instance_name): @@ -52,16 +80,20 @@ class ComputeDriver(object): :num_cpu: (int) the number of virtual CPUs for the domain :cpu_time: (int) the CPU time used in nanoseconds """ + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def list_instances(self): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def list_instances_detail(self): """Return a list of InstanceInfo for all registered VMs""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() - def spawn(self, instance, network_info, block_device_mapping=None): + def spawn(self, context, instance, + network_info=None, block_device_info=None): """Launch a VM for the specified instance""" raise NotImplementedError() @@ -79,29 +111,36 @@ class ComputeDriver(object): warning in that case. """ + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def reboot(self, instance, network_info): """Reboot specified VM""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def snapshot_instance(self, context, instance_id, image_id): raise NotImplementedError() def get_console_pool_info(self, console_type): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_console_output(self, instance): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_ajax_console(self, instance): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_diagnostics(self, instance): """Return data about VM diagnostics""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_host_ip_addr(self): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def attach_volume(self, context, instance_id, volume_id, mountpoint): @@ -116,42 +155,50 @@ class ComputeDriver(object): def migrate_disk_and_power_off(self, instance, dest): """Transfers the VHD of a running instance to another host, then shuts off the instance copies over the COW disk""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() - def snapshot(self, instance, image_id): + def snapshot(self, context, instance, image_id): """Create snapshot from a running VM instance.""" raise NotImplementedError() - def finish_resize(self, instance, disk_info): + def finish_migration(self, context, instance, disk_info, network_info, + resize_instance): """Completes a resize, turning on the migrated instance""" raise NotImplementedError() - def revert_resize(self, instance): + def revert_migration(self, instance): """Reverts a resize, powering back on the instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def pause(self, instance, callback): """Pause VM instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def unpause(self, instance, callback): """Unpause paused VM instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def suspend(self, instance, callback): """suspend the specified instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def resume(self, instance, callback): """resume the specified instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() - def rescue(self, instance, callback, network_info): + def rescue(self, context, instance, callback, network_info): """Rescue the specified instance""" raise NotImplementedError() def unrescue(self, instance, callback, network_info): """Unrescue the specified instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def update_available_resource(self, ctxt, host): @@ -164,6 +211,7 @@ class ComputeDriver(object): :param host: hostname that compute manager is currently running """ + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def live_migration(self, ctxt, instance_ref, dest, @@ -183,20 +231,25 @@ class ComputeDriver(object): expected nova.compute.manager.recover_live_migration. """ + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def refresh_security_group_rules(self, security_group_id): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def refresh_security_group_members(self, security_group_id): + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def refresh_provider_fw_rules(self, security_group_id): """See: nova/virt/fake.py for docs.""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def reset_network(self, instance): """reset networking for specified instance""" + # TODO(Vek): Need to pass context in for access to auth_token pass def ensure_filtering_rules_for_instance(self, instance_ref): @@ -222,10 +275,12 @@ class ComputeDriver(object): :params instance_ref: nova.db.sqlalchemy.models.Instance object """ + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def unfilter_instance(self, instance, network_info): """Stop filtering instance""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def set_admin_password(self, context, instance_id, new_pass=None): @@ -236,24 +291,34 @@ class ComputeDriver(object): """Create a file on the VM instance. The file path and contents should be base64-encoded. """ + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def agent_update(self, instance, url, md5hash): """Update agent on the VM instance.""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def inject_network_info(self, instance, nw_info): """inject network info for specified instance""" + # TODO(Vek): Need to pass context in for access to auth_token pass def poll_rescued_instances(self, timeout): """Poll for rescued instances""" + # TODO(Vek): Need to pass context in for access to auth_token + raise NotImplementedError() + + def host_power_action(self, host, action): + """Reboots, shuts down or powers up the host.""" raise NotImplementedError() def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def plug_vifs(self, instance, network_info): """Plugs in VIFs to networks.""" + # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 26bc421c0..93c54a27d 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -129,7 +129,8 @@ class FakeConnection(driver.ComputeDriver): info_list.append(self._map_to_instance_info(instance)) return info_list - def spawn(self, instance, network_info, block_device_mapping=None): + def spawn(self, context, instance, + network_info=None, block_device_info=None): """ Create a new instance/VM/domain on the virtualization platform. @@ -153,7 +154,7 @@ class FakeConnection(driver.ComputeDriver): fake_instance = FakeInstance(name, state) self.instances[name] = fake_instance - def snapshot(self, instance, name): + def snapshot(self, context, instance, name): """ Snapshots the specified instance. @@ -240,7 +241,7 @@ class FakeConnection(driver.ComputeDriver): """ pass - def rescue(self, instance, callback, network_info): + def rescue(self, context, instance, callback, network_info): """ Rescue the specified instance. """ @@ -340,8 +341,7 @@ class FakeConnection(driver.ComputeDriver): only useful for giving back to this layer as a parameter to disk_stats). These IDs only need to be unique for a given instance. - Note that this function takes an instance ID, not a - compute.service.Instance, so that it can be called by compute.monitor. + Note that this function takes an instance ID. """ return ['A_DISK'] @@ -353,8 +353,7 @@ class FakeConnection(driver.ComputeDriver): interface_stats). These IDs only need to be unique for a given instance. - Note that this function takes an instance ID, not a - compute.service.Instance, so that it can be called by compute.monitor. + Note that this function takes an instance ID. """ return ['A_VIF'] @@ -374,8 +373,7 @@ class FakeConnection(driver.ComputeDriver): having to do the aggregation. On those platforms, this method is unused. - Note that this function takes an instance ID, not a - compute.service.Instance, so that it can be called by compute.monitor. + Note that this function takes an instance ID. """ return [0L, 0L, 0L, 0L, None] @@ -395,8 +393,7 @@ class FakeConnection(driver.ComputeDriver): having to do the aggregation. On those platforms, this method is unused. - Note that this function takes an instance ID, not a - compute.service.Instance, so that it can be called by compute.monitor. + Note that this function takes an instance ID. """ return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L] @@ -515,6 +512,10 @@ class FakeConnection(driver.ComputeDriver): """Return fake Host Status of ram, disk, network.""" return self.host_status + def host_power_action(self, host, action): + """Reboots, shuts down or powers up the host.""" + pass + def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" pass diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 81c7dea58..43658a6c2 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -66,7 +66,6 @@ import time from nova import exception from nova import flags from nova import log as logging -from nova.auth import manager from nova.compute import power_state from nova.virt import driver from nova.virt import images @@ -139,19 +138,19 @@ class HyperVConnection(driver.ComputeDriver): return instance_infos - def spawn(self, instance, network_info, block_device_mapping=None): + def spawn(self, context, instance, + network_info=None, block_device_info=None): """ Create a new VM and start it.""" vm = self._lookup(instance.name) if vm is not None: raise exception.InstanceExists(name=instance.name) - user = manager.AuthManager().get_user(instance['user_id']) - project = manager.AuthManager().get_project(instance['project_id']) #Fetch the file, assume it is a VHD file. base_vhd_filename = os.path.join(FLAGS.instances_path, instance.name) vhdfile = "%s.vhd" % (base_vhd_filename) - images.fetch(instance['image_ref'], vhdfile, user, project) + images.fetch(instance['image_ref'], vhdfile, + instance['user_id'], instance['project_id']) try: self._create_vm(instance) @@ -500,6 +499,10 @@ class HyperVConnection(driver.ComputeDriver): """See xenapi_conn.py implementation.""" pass + def host_power_action(self, host, action): + """Reboots, shuts down or powers up the host.""" + pass + def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" pass diff --git a/nova/virt/images.py b/nova/virt/images.py index 40bf6107c..54c691a40 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -21,7 +21,6 @@ Handling of VM disk images. """ -from nova import context from nova import flags from nova.image import glance as glance_image_service import nova.image @@ -33,13 +32,12 @@ FLAGS = flags.FLAGS LOG = logging.getLogger('nova.virt.images') -def fetch(image_href, path, _user, _project): +def fetch(context, image_href, path, _user_id, _project_id): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. (image_service, image_id) = nova.image.get_image_service(image_href) with open(path, "wb") as image_file: - elevated = context.get_admin_context() - metadata = image_service.get(elevated, image_id, image_file) + metadata = image_service.get(context, image_id, image_file) return metadata diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index a75636390..210e2b0fb 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -3,24 +3,22 @@ <memory>${memory_kb}</memory> <os> #if $type == 'lxc' - #set $disk_prefix = '' #set $disk_bus = '' <type>exe</type> <init>/sbin/init</init> #else if $type == 'uml' - #set $disk_prefix = 'ubd' #set $disk_bus = 'uml' <type>uml</type> <kernel>/usr/bin/linux</kernel> - <root>/dev/ubda</root> + #set $root_device_name = $getVar('root_device_name', '/dev/ubda') + <root>${root_device_name}</root> #else #if $type == 'xen' - #set $disk_prefix = 'sd' #set $disk_bus = 'scsi' <type>linux</type> - <root>/dev/xvda</root> + #set $root_device_name = $getVar('root_device_name', '/dev/xvda') + <root>${root_device_name}</root> #else - #set $disk_prefix = 'vd' #set $disk_bus = 'virtio' <type>hvm</type> #end if @@ -33,7 +31,8 @@ #if $type == 'xen' <cmdline>ro</cmdline> #else - <cmdline>root=/dev/vda console=ttyS0</cmdline> + #set $root_device_name = $getVar('root_device_name', '/dev/vda') + <cmdline>root=${root_device_name} console=ttyS0</cmdline> #end if #if $getVar('ramdisk', None) <initrd>${ramdisk}</initrd> @@ -71,16 +70,30 @@ <disk type='file'> <driver type='${driver_type}'/> <source file='${basepath}/disk'/> - <target dev='${disk_prefix}a' bus='${disk_bus}'/> + <target dev='${root_device}' bus='${disk_bus}'/> </disk> #end if - #if $getVar('local', False) + #if $getVar('local_device', False) <disk type='file'> <driver type='${driver_type}'/> <source file='${basepath}/disk.local'/> - <target dev='${disk_prefix}b' bus='${disk_bus}'/> + <target dev='${local_device}' bus='${disk_bus}'/> </disk> #end if + #for $eph in $ephemerals + <disk type='block'> + <driver type='${driver_type}'/> + <source dev='${basepath}/${eph.device_path}'/> + <target dev='${eph.device}' bus='${disk_bus}'/> + </disk> + #end for + #if $getVar('swap_device', False) + <disk type='file'> + <driver type='${driver_type}'/> + <source file='${basepath}/disk.swap'/> + <target dev='${swap_device}' bus='${disk_bus}'/> + </disk> + #end if #for $vol in $volumes <disk type='${vol.type}'> <driver type='raw'/> diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index c27e92feb..16efa7292 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -54,7 +54,8 @@ from xml.etree import ElementTree from eventlet import greenthread from eventlet import tpool -from nova import context +from nova import block_device +from nova import context as nova_context from nova import db from nova import exception from nova import flags @@ -121,8 +122,6 @@ flags.DEFINE_integer('live_migration_bandwidth', 0, 'Define live migration behavior') flags.DEFINE_string('qemu_img', 'qemu-img', 'binary to use for qemu-img commands') -flags.DEFINE_bool('start_guests_on_host_boot', False, - 'Whether to restart guests when the host reboots') flags.DEFINE_string('libvirt_vif_type', 'bridge', 'Type of VIF to create.') flags.DEFINE_string('libvirt_vif_driver', @@ -153,8 +152,8 @@ def _late_load_cheetah(): Template = t.Template -def _strip_dev(mount_path): - return re.sub(r'^/dev/', '', mount_path) +def _get_eph_disk(ephemeral): + return 'disk.eph' + str(ephemeral['num']) class LibvirtConnection(driver.ComputeDriver): @@ -173,27 +172,8 @@ class LibvirtConnection(driver.ComputeDriver): self.vif_driver = utils.import_object(FLAGS.libvirt_vif_driver) def init_host(self, host): - # Adopt existing VM's running here - ctxt = context.get_admin_context() - for instance in db.instance_get_all_by_host(ctxt, host): - try: - LOG.debug(_('Checking state of %s'), instance['name']) - state = self.get_info(instance['name'])['state'] - except exception.NotFound: - state = power_state.SHUTOFF - - LOG.debug(_('Current state of %(name)s was %(state)s.'), - {'name': instance['name'], 'state': state}) - db.instance_set_state(ctxt, instance['id'], state) - - # NOTE(justinsb): We no longer delete SHUTOFF instances, - # the user may want to power them back on - - if state != power_state.RUNNING: - continue - self.firewall_driver.setup_basic_filtering(instance) - self.firewall_driver.prepare_instance_filter(instance) - self.firewall_driver.apply_instance_filter(instance) + # NOTE(nsokolov): moved instance restarting to ComputeManager + pass def _get_connection(self): if not self._wrapped_conn or not self._test_connection(): @@ -370,7 +350,7 @@ class LibvirtConnection(driver.ComputeDriver): """Returns the xml for the disk mounted at device""" try: doc = libxml2.parseDoc(xml) - except: + except Exception: return None ctx = doc.xpathNewContext() try: @@ -396,7 +376,7 @@ class LibvirtConnection(driver.ComputeDriver): virt_dom.detachDevice(xml) @exception.wrap_exception() - def snapshot(self, instance, image_href): + def snapshot(self, context, instance, image_href): """Create snapshot from a running VM instance. This command only works with qemu 0.14+, the qemu_img flag is @@ -405,18 +385,15 @@ class LibvirtConnection(driver.ComputeDriver): """ virt_dom = self._lookup_by_name(instance['name']) - elevated = context.get_admin_context() (image_service, image_id) = nova.image.get_image_service( instance['image_ref']) - base = image_service.show(elevated, image_id) + base = image_service.show(context, image_id) (snapshot_image_service, snapshot_image_id) = \ nova.image.get_image_service(image_href) - snapshot = snapshot_image_service.show(elevated, snapshot_image_id) + snapshot = snapshot_image_service.show(context, snapshot_image_id) - metadata = {'disk_format': base['disk_format'], - 'container_format': base['container_format'], - 'is_public': False, + metadata = {'is_public': False, 'status': 'active', 'name': snapshot['name'], 'properties': { @@ -431,6 +408,12 @@ class LibvirtConnection(driver.ComputeDriver): arch = base['properties']['architecture'] metadata['properties']['architecture'] = arch + if 'disk_format' in base: + metadata['disk_format'] = base['disk_format'] + + if 'container_format' in base: + metadata['container_format'] = base['container_format'] + # Make the snapshot snapshot_name = uuid.uuid4().hex snapshot_xml = """ @@ -463,7 +446,7 @@ class LibvirtConnection(driver.ComputeDriver): # Upload that image to the image service with open(out_path) as image_file: - image_service.update(elevated, + image_service.update(context, image_href, metadata, image_file) @@ -538,7 +521,7 @@ class LibvirtConnection(driver.ComputeDriver): dom.create() @exception.wrap_exception() - def rescue(self, instance, callback, network_info): + def rescue(self, context, instance, callback, network_info): """Loads a VM using rescue images. A rescue is normally performed when something goes wrong with the @@ -553,7 +536,7 @@ class LibvirtConnection(driver.ComputeDriver): rescue_images = {'image_id': FLAGS.rescue_image_id, 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} - self._create_image(instance, xml, '.rescue', rescue_images) + self._create_image(context, instance, xml, '.rescue', rescue_images) self._create_new_domain(xml) def _wait_for_rescue(): @@ -592,23 +575,18 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) @exception.wrap_exception() - def spawn(self, instance, network_info, block_device_mapping=None): + def spawn(self, context, instance, + network_info=None, block_device_info=None): xml = self.to_xml(instance, False, network_info=network_info, - block_device_mapping=block_device_mapping) - block_device_mapping = block_device_mapping or [] + block_device_info=block_device_info) self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) - self._create_image(instance, xml, network_info=network_info, - block_device_mapping=block_device_mapping) + self._create_image(context, instance, xml, network_info=network_info, + block_device_info=block_device_info) domain = self._create_new_domain(xml) LOG.debug(_("instance %s: is running"), instance['name']) self.firewall_driver.apply_instance_filter(instance) - if FLAGS.start_guests_on_host_boot: - LOG.debug(_("instance %s: setting autostart ON") % - instance['name']) - domain.setAutostart(1) - def _wait_for_boot(): """Called at an interval until the VM is running.""" instance_name = instance['name'] @@ -769,9 +747,10 @@ class LibvirtConnection(driver.ComputeDriver): else: utils.execute('cp', base, target) - def _fetch_image(self, target, image_id, user, project, size=None): + def _fetch_image(self, context, target, image_id, user_id, project_id, + size=None): """Grab image and optionally attempt to resize it""" - images.fetch(image_id, target, user, project) + images.fetch(context, image_id, target, user_id, project_id) if size: disk.extend(target, size) @@ -780,10 +759,14 @@ class LibvirtConnection(driver.ComputeDriver): utils.execute('truncate', target, '-s', "%dG" % local_gb) # TODO(vish): should we format disk by default? - def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None, - network_info=None, block_device_mapping=None): - block_device_mapping = block_device_mapping or [] + def _create_swap(self, target, swap_gb): + """Create a swap file of specified size""" + self._create_local(target, swap_gb) + utils.execute('mkswap', target) + def _create_image(self, context, inst, libvirt_xml, suffix='', + disk_images=None, network_info=None, + block_device_info=None): if not suffix: suffix = '' @@ -809,9 +792,6 @@ class LibvirtConnection(driver.ComputeDriver): os.close(os.open(basepath('console.log', ''), os.O_CREAT | os.O_WRONLY, 0660)) - user = manager.AuthManager().get_user(inst['user_id']) - project = manager.AuthManager().get_project(inst['project_id']) - if not disk_images: disk_images = {'image_id': inst['image_ref'], 'kernel_id': inst['kernel_id'], @@ -820,19 +800,21 @@ class LibvirtConnection(driver.ComputeDriver): if disk_images['kernel_id']: fname = '%08x' % int(disk_images['kernel_id']) self._cache_image(fn=self._fetch_image, + context=context, target=basepath('kernel'), fname=fname, image_id=disk_images['kernel_id'], - user=user, - project=project) + user_id=inst['user_id'], + project_id=inst['project_id']) if disk_images['ramdisk_id']: fname = '%08x' % int(disk_images['ramdisk_id']) self._cache_image(fn=self._fetch_image, + context=context, target=basepath('ramdisk'), fname=fname, image_id=disk_images['ramdisk_id'], - user=user, - project=project) + user_id=inst['user_id'], + project_id=inst['project_id']) root_fname = hashlib.sha1(disk_images['image_id']).hexdigest() size = FLAGS.minimum_root_size @@ -843,24 +825,50 @@ class LibvirtConnection(driver.ComputeDriver): size = None root_fname += "_sm" - if not self._volume_in_mapping(self.root_mount_device, - block_device_mapping): + if not self._volume_in_mapping(self.default_root_device, + block_device_info): self._cache_image(fn=self._fetch_image, + context=context, target=basepath('disk'), fname=root_fname, cow=FLAGS.use_cow_images, image_id=disk_images['image_id'], - user=user, - project=project, + user_id=inst['user_id'], + project_id=inst['project_id'], size=size) - if inst_type['local_gb'] and not self._volume_in_mapping( - self.local_mount_device, block_device_mapping): + local_gb = inst['local_gb'] + if local_gb and not self._volume_in_mapping( + self.default_local_device, block_device_info): self._cache_image(fn=self._create_local, target=basepath('disk.local'), - fname="local_%s" % inst_type['local_gb'], + fname="local_%s" % local_gb, + cow=FLAGS.use_cow_images, + local_gb=local_gb) + + for eph in driver.block_device_info_get_ephemerals(block_device_info): + self._cache_image(fn=self._create_local, + target=basepath(_get_eph_disk(eph)), + fname="local_%s" % eph['size'], cow=FLAGS.use_cow_images, - local_gb=inst_type['local_gb']) + local_gb=eph['size']) + + swap_gb = 0 + + swap = driver.block_device_info_get_swap(block_device_info) + if driver.swap_is_usable(swap): + swap_gb = swap['swap_size'] + elif (inst_type['swap'] > 0 and + not self._volume_in_mapping(self.default_swap_device, + block_device_info)): + swap_gb = inst_type['swap'] + + if swap_gb > 0: + self._cache_image(fn=self._create_swap, + target=basepath('disk.swap'), + fname="swap_%s" % swap_gb, + cow=FLAGS.use_cow_images, + swap_gb=swap_gb) # For now, we assume that if we're not using a kernel, we're using a # partitioned disk image where the target partition is the first @@ -882,7 +890,7 @@ class LibvirtConnection(driver.ComputeDriver): ifc_template = open(FLAGS.injected_network_template).read() ifc_num = -1 have_injected_networks = False - admin_context = context.get_admin_context() + admin_context = nova_context.get_admin_context() for (network_ref, mapping) in network_info: ifc_num += 1 @@ -904,7 +912,7 @@ class LibvirtConnection(driver.ComputeDriver): 'netmask': netmask, 'gateway': mapping['gateway'], 'broadcast': mapping['broadcast'], - 'dns': mapping['dns'], + 'dns': ' '.join(mapping['dns']), 'address_v6': address_v6, 'gateway6': gateway_v6, 'netmask_v6': netmask_v6} @@ -941,16 +949,35 @@ class LibvirtConnection(driver.ComputeDriver): if FLAGS.libvirt_type == 'uml': utils.execute('sudo', 'chown', 'root', basepath('disk')) - root_mount_device = 'vda' # FIXME for now. it's hard coded. - local_mount_device = 'vdb' # FIXME for now. it's hard coded. - - def _volume_in_mapping(self, mount_device, block_device_mapping): - mount_device_ = _strip_dev(mount_device) - for vol in block_device_mapping: - vol_mount_device = _strip_dev(vol['mount_device']) - if vol_mount_device == mount_device_: - return True - return False + if FLAGS.libvirt_type == 'uml': + _disk_prefix = 'ubd' + elif FLAGS.libvirt_type == 'xen': + _disk_prefix = 'sd' + elif FLAGS.libvirt_type == 'lxc': + _disk_prefix = '' + else: + _disk_prefix = 'vd' + + default_root_device = _disk_prefix + 'a' + default_local_device = _disk_prefix + 'b' + default_swap_device = _disk_prefix + 'c' + + def _volume_in_mapping(self, mount_device, block_device_info): + block_device_list = [block_device.strip_dev(vol['mount_device']) + for vol in + driver.block_device_info_get_mapping( + block_device_info)] + swap = driver.block_device_info_get_swap(block_device_info) + if driver.swap_is_usable(swap): + block_device_list.append( + block_device.strip_dev(swap['device_name'])) + block_device_list += [block_device.strip_dev(ephemeral['device_name']) + for ephemeral in + driver.block_device_info_get_ephemerals( + block_device_info)] + + LOG.debug(_("block_device_list %s"), block_device_list) + return block_device.strip_dev(mount_device) in block_device_list def _get_volume_device_info(self, device_path): if device_path.startswith('/dev/'): @@ -962,8 +989,9 @@ class LibvirtConnection(driver.ComputeDriver): raise exception.InvalidDevicePath(path=device_path) def _prepare_xml_info(self, instance, rescue=False, network_info=None, - block_device_mapping=None): - block_device_mapping = block_device_mapping or [] + block_device_info=None): + block_device_mapping = driver.block_device_info_get_mapping( + block_device_info) # TODO(adiantum) remove network_info creation code # when multinics will be completed if not network_info: @@ -982,17 +1010,27 @@ class LibvirtConnection(driver.ComputeDriver): driver_type = 'raw' for vol in block_device_mapping: - vol['mount_device'] = _strip_dev(vol['mount_device']) + vol['mount_device'] = block_device.strip_dev(vol['mount_device']) (vol['type'], vol['protocol'], vol['name']) = \ self._get_volume_device_info(vol['device_path']) - ebs_root = self._volume_in_mapping(self.root_mount_device, - block_device_mapping) - if self._volume_in_mapping(self.local_mount_device, - block_device_mapping): - local_gb = False - else: - local_gb = inst_type['local_gb'] + ebs_root = self._volume_in_mapping(self.default_root_device, + block_device_info) + + local_device = False + if not (self._volume_in_mapping(self.default_local_device, + block_device_info) or + 0 in [eph['num'] for eph in + driver.block_device_info_get_ephemerals( + block_device_info)]): + if instance['local_gb'] > 0: + local_device = self.default_local_device + + ephemerals = [] + for eph in driver.block_device_info_get_ephemerals(block_device_info): + ephemerals.append({'device_path': _get_eph_disk(eph), + 'device': block_device.strip_dev( + eph['device_name'])}) xml_info = {'type': FLAGS.libvirt_type, 'name': instance['name'], @@ -1001,12 +1039,35 @@ class LibvirtConnection(driver.ComputeDriver): 'memory_kb': inst_type['memory_mb'] * 1024, 'vcpus': inst_type['vcpus'], 'rescue': rescue, - 'local': local_gb, + 'disk_prefix': self._disk_prefix, 'driver_type': driver_type, 'vif_type': FLAGS.libvirt_vif_type, 'nics': nics, 'ebs_root': ebs_root, - 'volumes': block_device_mapping} + 'local_device': local_device, + 'volumes': block_device_mapping, + 'ephemerals': ephemerals} + + root_device_name = driver.block_device_info_get_root(block_device_info) + if root_device_name: + xml_info['root_device'] = block_device.strip_dev(root_device_name) + xml_info['root_device_name'] = root_device_name + else: + # NOTE(yamahata): + # for nova.api.ec2.cloud.CloudController.get_metadata() + xml_info['root_device'] = self.default_root_device + db.instance_update( + nova_context.get_admin_context(), instance['id'], + {'root_device_name': '/dev/' + self.default_root_device}) + + swap = driver.block_device_info_get_swap(block_device_info) + if driver.swap_is_usable(swap): + xml_info['swap_device'] = block_device.strip_dev( + swap['device_name']) + elif (inst_type['swap'] > 0 and + not self._volume_in_mapping(self.default_swap_device, + block_device_info)): + xml_info['swap_device'] = self.default_swap_device if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'): xml_info['vncserver_host'] = FLAGS.vncserver_host @@ -1022,12 +1083,11 @@ class LibvirtConnection(driver.ComputeDriver): return xml_info def to_xml(self, instance, rescue=False, network_info=None, - block_device_mapping=None): - block_device_mapping = block_device_mapping or [] + block_device_info=None): # TODO(termie): cache? LOG.debug(_('instance %s: starting toXML method'), instance['name']) xml_info = self._prepare_xml_info(instance, rescue, network_info, - block_device_mapping) + block_device_info) xml = str(Template(self.libvirt_xml, searchList=[xml_info])) LOG.debug(_('instance %s: finished toXML method'), instance['name']) return xml @@ -1090,8 +1150,7 @@ class LibvirtConnection(driver.ComputeDriver): def get_disks(self, instance_name): """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. + Note that this function takes an instance name. Returns a list of all block devices for this domain. """ @@ -1102,7 +1161,7 @@ class LibvirtConnection(driver.ComputeDriver): try: doc = libxml2.parseDoc(xml) - except: + except Exception: return [] ctx = doc.xpathNewContext() @@ -1132,8 +1191,7 @@ class LibvirtConnection(driver.ComputeDriver): def get_interfaces(self, instance_name): """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. + Note that this function takes an instance name. Returns a list of all network interfaces for this instance. """ @@ -1144,7 +1202,7 @@ class LibvirtConnection(driver.ComputeDriver): try: doc = libxml2.parseDoc(xml) - except: + except Exception: return [] ctx = doc.xpathNewContext() @@ -1348,16 +1406,14 @@ class LibvirtConnection(driver.ComputeDriver): def block_stats(self, instance_name, disk): """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. + Note that this function takes an instance name. """ domain = self._lookup_by_name(instance_name) return domain.blockStats(disk) def interface_stats(self, instance_name, interface): """ - Note that this function takes an instance name, not an Instance, so - that it can be called by monitor. + Note that this function takes an instance name. """ domain = self._lookup_by_name(instance_name) return domain.interfaceStats(interface) @@ -1586,6 +1642,10 @@ class LibvirtConnection(driver.ComputeDriver): """See xenapi_conn.py implementation.""" pass + def host_power_action(self, host, action): + """Reboots, shuts down or powers up the host.""" + pass + def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" pass diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index eef582fac..711b05bae 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -25,6 +25,7 @@ from nova.network import linux_net from nova.virt.libvirt import netutils from nova import utils from nova.virt.vif import VIFDriver +from nova import exception LOG = logging.getLogger('nova.virt.libvirt.vif') @@ -128,7 +129,7 @@ class LibvirtOpenVswitchDriver(VIFDriver): utils.execute('sudo', 'ovs-vsctl', 'del-port', network['bridge'], dev) utils.execute('sudo', 'ip', 'link', 'delete', dev) - except: + except exception.ProcessExecutionError: LOG.warning(_("Failed while unplugging vif of instance '%s'"), instance['name']) raise diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 1ee8fa1c0..07a6ba6ab 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -26,7 +26,7 @@ import urllib import urllib2
import uuid
-from nova import context
+from nova import context as nova_context
from nova import db
from nova import exception
from nova import flags
@@ -89,7 +89,7 @@ class VMWareVMOps(object): LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
return lst_vm_names
- def spawn(self, instance, network_info):
+ def spawn(self, context, instance, network_info):
"""
Creates a VM instance.
@@ -111,7 +111,7 @@ class VMWareVMOps(object): client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
- network = db.network_get_by_instance(context.get_admin_context(),
+ network = db.network_get_by_instance(nova_context.get_admin_context(),
instance['id'])
net_name = network['bridge']
@@ -329,7 +329,7 @@ class VMWareVMOps(object): LOG.debug(_("Powered on the VM instance %s") % instance.name)
_power_on_vm()
- def snapshot(self, instance, snapshot_name):
+ def snapshot(self, context, instance, snapshot_name):
"""
Create snapshot from a running VM instance.
Steps followed are:
@@ -721,11 +721,11 @@ class VMWareVMOps(object): Set the machine id of the VM for guest tools to pick up and change
the IP.
"""
- admin_context = context.get_admin_context()
+ admin_context = nova_context.get_admin_context()
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
- network = db.network_get_by_instance(context.get_admin_context(),
+ network = db.network_get_by_instance(nova_context.get_admin_context(),
instance['id'])
mac_address = None
if instance['mac_addresses']:
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py index ce57847b2..aaa384374 100644 --- a/nova/virt/vmwareapi_conn.py +++ b/nova/virt/vmwareapi_conn.py @@ -124,13 +124,14 @@ class VMWareESXConnection(driver.ComputeDriver): """List VM instances."""
return self._vmops.list_instances()
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance, network_info,
+ block_device_mapping=None):
"""Create VM instance."""
- self._vmops.spawn(instance, network_info)
+ self._vmops.spawn(context, instance, network_info)
- def snapshot(self, instance, name):
+ def snapshot(self, context, instance, name):
"""Create snapshot from a running VM instance."""
- self._vmops.snapshot(instance, name)
+ self._vmops.snapshot(context, instance, name)
def reboot(self, instance, network_info):
"""Reboot VM instance."""
@@ -190,6 +191,10 @@ class VMWareESXConnection(driver.ComputeDriver): """This method is supported only by libvirt."""
return
+ def host_power_action(self, host, action):
+ """Reboots, shuts down or powers up the host."""
+ pass
+
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
pass
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 62863c6d8..6d2340ccd 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -37,7 +37,6 @@ import nova.image from nova.image import glance as glance_image_service from nova import log as logging from nova import utils -from nova.auth.manager import AuthManager from nova.compute import instance_types from nova.compute import power_state from nova.virt import disk @@ -85,38 +84,22 @@ class ImageType: DISK = 2 DISK_RAW = 3 DISK_VHD = 4 + _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD) KERNEL_STR = "kernel" RAMDISK_STR = "ramdisk" DISK_STR = "os" DISK_RAW_STR = "os_raw" DISK_VHD_STR = "vhd" + _strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR) @classmethod def to_string(cls, image_type): - if image_type == ImageType.KERNEL: - return ImageType.KERNEL_STR - elif image_type == ImageType.RAMDISK: - return ImageType.RAMDISK_STR - elif image_type == ImageType.DISK: - return ImageType.DISK_STR - elif image_type == ImageType.DISK_RAW: - return ImageType.DISK_RAW_STR - elif image_type == ImageType.DISK_VHD: - return ImageType.VHD_STR + return dict(zip(ImageType._ids, ImageType._strs)).get(image_type) @classmethod def from_string(cls, image_type_str): - if image_type_str == ImageType.KERNEL_STR: - return ImageType.KERNEL - elif image_type == ImageType.RAMDISK_STR: - return ImageType.RAMDISK - elif image_type == ImageType.DISK_STR: - return ImageType.DISK - elif image_type == ImageType.DISK_RAW_STR: - return ImageType.DISK_RAW - elif image_type == ImageType.DISK_VHD_STR: - return ImageType.VHD + return dict(zip(ImageType._strs, ImageType._ids)).get(image_type_str) class VMHelper(HelperBase): @@ -359,7 +342,7 @@ class VMHelper(HelperBase): return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid) @classmethod - def upload_image(cls, session, instance, vdi_uuids, image_id): + def upload_image(cls, context, session, instance, vdi_uuids, image_id): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ @@ -377,37 +360,30 @@ class VMHelper(HelperBase): 'glance_host': glance_host, 'glance_port': glance_port, 'sr_path': cls.get_sr_path(session), - 'os_type': os_type} + 'os_type': os_type, + 'auth_token': getattr(context, 'auth_token', None)} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'upload_vhd', kwargs) session.wait_for_task(task, instance.id) @classmethod - def fetch_image(cls, session, instance_id, image, user, project, - image_type): - """ - image_type is interpreted as an ImageType instance - Related flags: - xenapi_image_service = ['glance', 'objectstore'] - glance_address = 'address for glance services' - glance_port = 'port for glance services' + def fetch_image(cls, context, session, instance_id, image, user_id, + project_id, image_type): + """Fetch image from glance based on image type. - Returns: A single filename if image_type is KERNEL_RAMDISK + Returns: A single filename if image_type is KERNEL or RAMDISK A list of dictionaries that describe VDIs, otherwise """ - access = AuthManager().get_access_key(user, project) - - if FLAGS.xenapi_image_service == 'glance': - return cls._fetch_image_glance(session, instance_id, image, - access, image_type) + if image_type == ImageType.DISK_VHD: + return cls._fetch_image_glance_vhd(context, + session, instance_id, image, image_type) else: - return cls._fetch_image_objectstore(session, instance_id, image, - access, user.secret, - image_type) + return cls._fetch_image_glance_disk(context, + session, instance_id, image, image_type) @classmethod - def _fetch_image_glance_vhd(cls, session, instance_id, image, access, + def _fetch_image_glance_vhd(cls, context, session, instance_id, image, image_type): """Tell glance to download an image and put the VHDs into the SR @@ -429,7 +405,8 @@ class VMHelper(HelperBase): 'glance_host': glance_host, 'glance_port': glance_port, 'uuid_stack': uuid_stack, - 'sr_path': cls.get_sr_path(session)} + 'sr_path': cls.get_sr_path(session), + 'auth_token': getattr(context, 'auth_token', None)} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'download_vhd', kwargs) @@ -455,7 +432,7 @@ class VMHelper(HelperBase): return vdis @classmethod - def _fetch_image_glance_disk(cls, session, instance_id, image, access, + def _fetch_image_glance_disk(cls, context, session, instance_id, image, image_type): """Fetch the image from Glance @@ -475,6 +452,7 @@ class VMHelper(HelperBase): sr_ref = safe_find_sr(session) glance_client, image_id = nova.image.get_glance_client(image) + glance_client.set_auth_token(getattr(context, 'auth_token', None)) meta, image_file = glance_client.get_image(image_id) virtual_size = int(meta['size']) vdi_size = virtual_size @@ -578,136 +556,38 @@ class VMHelper(HelperBase): else: return ImageType.DISK_RAW - # FIXME(sirp): can we unify the ImageService and xenapi_image_service - # abstractions? - if FLAGS.xenapi_image_service == 'glance': - image_type = determine_from_glance() - else: - image_type = determine_from_instance() + image_type = determine_from_glance() log_disk_format(image_type) return image_type @classmethod - def _fetch_image_glance(cls, session, instance_id, image, access, - image_type): - """Fetch image from glance based on image type. - - Returns: A single filename if image_type is KERNEL or RAMDISK - A list of dictionaries that describe VDIs, otherwise - """ - if image_type == ImageType.DISK_VHD: - return cls._fetch_image_glance_vhd( - session, instance_id, image, access, image_type) - else: - return cls._fetch_image_glance_disk( - session, instance_id, image, access, image_type) - - @classmethod - def _fetch_image_objectstore(cls, session, instance_id, image, access, - secret, image_type): - """Fetch an image from objectstore. - - Returns: A single filename if image_type is KERNEL or RAMDISK - A list of dictionaries that describe VDIs, otherwise - """ - url = "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, - image) - LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) - if image_type in (ImageType.KERNEL, ImageType.RAMDISK): - fn = 'get_kernel' - else: - fn = 'get_vdi' - args = {} - args['src_url'] = url - args['username'] = access - args['password'] = secret - args['add_partition'] = 'false' - args['raw'] = 'false' - if not image_type in (ImageType.KERNEL, ImageType.RAMDISK): - args['add_partition'] = 'true' - if image_type == ImageType.DISK_RAW: - args['raw'] = 'true' - task = session.async_call_plugin('objectstore', fn, args) - vdi_uuid = None - filename = None - if image_type in (ImageType.KERNEL, ImageType.RAMDISK): - filename = session.wait_for_task(task, instance_id) - else: - vdi_uuid = session.wait_for_task(task, instance_id) - return [dict(vdi_type=ImageType.to_string(image_type), - vdi_uuid=vdi_uuid, - file=filename)] - - @classmethod def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type, os_type): """ Determine whether the VM will use a paravirtualized kernel or if it will use hardware virtualization. - 1. Objectstore (any image type): - We use plugin to figure out whether the VDI uses PV + 1. Glance (VHD): then we use `os_type`, raise if not set - 2. Glance (VHD): then we use `os_type`, raise if not set - - 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is + 2. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is available - 4. Glance (DISK): pv is assumed - """ - if FLAGS.xenapi_image_service == 'glance': - # 2, 3, 4: Glance - return cls._determine_is_pv_glance( - session, vdi_ref, disk_image_type, os_type) - else: - # 1. Objecstore - return cls._determine_is_pv_objectstore(session, instance_id, - vdi_ref) - - @classmethod - def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref): - LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) - fn = "is_vdi_pv" - args = {} - args['vdi-ref'] = vdi_ref - task = session.async_call_plugin('objectstore', fn, args) - pv_str = session.wait_for_task(task, instance_id) - pv = None - if pv_str.lower() == 'true': - pv = True - elif pv_str.lower() == 'false': - pv = False - LOG.debug(_("PV Kernel in VDI:%s"), pv) - return pv - - @classmethod - def _determine_is_pv_glance(cls, session, vdi_ref, disk_image_type, - os_type): - """ - For a Glance image, determine if we need paravirtualization. - - The relevant scenarios are: - 2. Glance (VHD): then we use `os_type`, raise if not set - - 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is - available - - 4. Glance (DISK): pv is assumed + 3. Glance (DISK): pv is assumed """ LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) if disk_image_type == ImageType.DISK_VHD: - # 2. VHD + # 1. VHD if os_type == 'windows': is_pv = False else: is_pv = True elif disk_image_type == ImageType.DISK_RAW: - # 3. RAW + # 2. RAW is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv) elif disk_image_type == ImageType.DISK: - # 4. Disk + # 3. Disk is_pv = True else: raise exception.Error(_("Unknown image format %(disk_image_type)s") @@ -1215,6 +1095,8 @@ def _prepare_injectables(inst, networks_info): ip_v6 = info['ip6s'][0] if len(info['dns']) > 0: dns = info['dns'][0] + else: + dns = '' interface_info = {'name': 'eth%d' % ifc_num, 'address': ip_v4 and ip_v4['ip'] or '', 'netmask': ip_v4 and ip_v4['netmask'] or '', diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 77efe1bf0..b913e764e 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -30,7 +30,7 @@ import sys import time import uuid -from nova import context +from nova import context as nova_context from nova import db from nova import exception from nova import flags @@ -38,7 +38,6 @@ from nova import ipv6 from nova import log as logging from nova import utils -from nova.auth.manager import AuthManager from nova.compute import power_state from nova.virt import driver from nova.virt.xenapi.network_utils import NetworkHelper @@ -110,18 +109,20 @@ class VMOps(object): instance_infos.append(instance_info) return instance_infos - def revert_resize(self, instance): + def revert_migration(self, instance): vm_ref = VMHelper.lookup(self._session, instance.name) self._start(instance, vm_ref) - def finish_resize(self, instance, disk_info, network_info): + def finish_migration(self, context, instance, disk_info, network_info, + resize_instance): vdi_uuid = self.link_disks(instance, disk_info['base_copy'], disk_info['cow']) - vm_ref = self._create_vm(instance, + vm_ref = self._create_vm(context, instance, [dict(vdi_type='os', vdi_uuid=vdi_uuid)], network_info) - self.resize_instance(instance, vdi_uuid) - self._spawn(instance, vm_ref) + if resize_instance: + self.resize_instance(instance, vdi_uuid) + self._start(instance, vm_ref=vm_ref) def _start(self, instance, vm_ref=None): """Power on a VM instance""" @@ -133,20 +134,19 @@ class VMOps(object): LOG.debug(_("Starting instance %s"), instance.name) self._session.call_xenapi('VM.start', vm_ref, False, False) - def _create_disks(self, instance): - user = AuthManager().get_user(instance.user_id) - project = AuthManager().get_project(instance.project_id) + def _create_disks(self, context, instance): disk_image_type = VMHelper.determine_disk_image_type(instance) - vdis = VMHelper.fetch_image(self._session, - instance.id, instance.image_ref, user, project, + vdis = VMHelper.fetch_image(context, self._session, + instance.id, instance.image_ref, + instance.user_id, instance.project_id, disk_image_type) return vdis - def spawn(self, instance, network_info): + def spawn(self, context, instance, network_info): vdis = None try: - vdis = self._create_disks(instance) - vm_ref = self._create_vm(instance, vdis, network_info) + vdis = self._create_disks(context, instance) + vm_ref = self._create_vm(context, instance, vdis, network_info) self._spawn(instance, vm_ref) except (self.XenAPI.Failure, OSError, IOError) as spawn_error: LOG.exception(_("instance %s: Failed to spawn"), @@ -156,11 +156,11 @@ class VMOps(object): self._handle_spawn_error(vdis, spawn_error) raise spawn_error - def spawn_rescue(self, instance): + def spawn_rescue(self, context, instance, network_info): """Spawn a rescue instance.""" - self.spawn(instance) + self.spawn(context, instance, network_info) - def _create_vm(self, instance, vdis, network_info): + def _create_vm(self, context, instance, vdis, network_info): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) @@ -171,26 +171,23 @@ class VMOps(object): if not VMHelper.ensure_free_mem(self._session, instance): LOG.exception(_('instance %(instance_name)s: not enough free ' 'memory') % locals()) - db.instance_set_state(context.get_admin_context(), + db.instance_set_state(nova_context.get_admin_context(), instance['id'], power_state.SHUTDOWN) return - user = AuthManager().get_user(instance.user_id) - project = AuthManager().get_project(instance.project_id) - disk_image_type = VMHelper.determine_disk_image_type(instance) kernel = None ramdisk = None try: if instance.kernel_id: - kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, - ImageType.KERNEL)[0] + kernel = VMHelper.fetch_image(context, self._session, + instance.id, instance.kernel_id, instance.user_id, + instance.project_id, ImageType.KERNEL)[0] if instance.ramdisk_id: - ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, - ImageType.RAMDISK)[0] + ramdisk = VMHelper.fetch_image(context, self._session, + instance.id, instance.kernel_id, instance.user_id, + instance.project_id, ImageType.RAMDISK)[0] # Create the VM ref and attach the first disk first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdis[0]['vdi_uuid']) @@ -209,7 +206,7 @@ class VMOps(object): if instance.vm_mode != vm_mode: # Update database with normalized (or determined) value - db.instance_update(context.get_admin_context(), + db.instance_update(nova_context.get_admin_context(), instance['id'], {'vm_mode': vm_mode}) vm_ref = VMHelper.create_vm(self._session, instance, kernel and kernel.get('file', None) or None, @@ -271,7 +268,7 @@ class VMOps(object): LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.') % locals()) - ctx = context.get_admin_context() + ctx = nova_context.get_admin_context() agent_build = db.agent_build_get_by_triple(ctx, 'xen', instance.os_type, instance.architecture) if agent_build: @@ -415,7 +412,7 @@ class VMOps(object): # if instance_or_vm is an int/long it must be instance id elif isinstance(instance_or_vm, (int, long)): - ctx = context.get_admin_context() + ctx = nova_context.get_admin_context() instance_obj = db.instance_get(ctx, instance_or_vm) instance_name = instance_obj.name else: @@ -440,9 +437,10 @@ class VMOps(object): vm, "start") - def snapshot(self, instance, image_id): + def snapshot(self, context, instance, image_id): """Create snapshot from a running VM instance. + :param context: request context :param instance: instance to be snapshotted :param image_id: id of image to upload to @@ -467,7 +465,7 @@ class VMOps(object): try: template_vm_ref, template_vdi_uuids = self._get_snapshot(instance) # call plugin to ship snapshot off to glance - VMHelper.upload_image( + VMHelper.upload_image(context, self._session, instance, template_vdi_uuids, image_id) finally: if template_vm_ref: @@ -568,18 +566,22 @@ class VMOps(object): return new_cow_uuid def resize_instance(self, instance, vdi_uuid): - """Resize a running instance by changing it's RAM and disk size.""" + """Resize a running instance by changing its RAM and disk size.""" #TODO(mdietz): this will need to be adjusted for swap later #The new disk size must be in bytes - new_disk_size = str(instance.local_gb * 1024 * 1024 * 1024) - instance_name = instance.name - instance_local_gb = instance.local_gb - LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance %(instance_name)s." - " Expanding to %(instance_local_gb)d GB") % locals()) - vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - self._session.call_xenapi('VDI.resize_online', vdi_ref, new_disk_size) - LOG.debug(_("Resize instance %s complete") % (instance.name)) + new_disk_size = instance.local_gb * 1024 * 1024 * 1024 + if new_disk_size > 0: + instance_name = instance.name + instance_local_gb = instance.local_gb + LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance" + "%(instance_name)s. Expanding to %(instance_local_gb)d" + " GB") % locals()) + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) + # for an instance with no local storage + self._session.call_xenapi('VDI.resize_online', vdi_ref, + str(new_disk_size)) + LOG.debug(_("Resize instance %s complete") % (instance.name)) def reboot(self, instance): """Reboot VM instance.""" @@ -684,7 +686,7 @@ class VMOps(object): # Successful return code from password is '0' if resp_dict['returncode'] != '0': raise RuntimeError(resp_dict['message']) - db.instance_update(context.get_admin_context(), + db.instance_update(nova_context.get_admin_context(), instance['id'], dict(admin_pass=new_pass)) return resp_dict['message'] @@ -741,6 +743,17 @@ class VMOps(object): except self.XenAPI.Failure, exc: LOG.exception(exc) + def _find_rescue_vbd_ref(self, vm_ref, rescue_vm_ref): + """Find and return the rescue VM's vbd_ref. + + We use the second VBD here because swap is first with the root file + system coming in second.""" + vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[1] + vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"] + + return VMHelper.create_vbd(self._session, rescue_vm_ref, vdi_ref, 1, + False) + def _shutdown_rescue(self, rescue_vm_ref): """Shutdown a rescue instance.""" self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm_ref) @@ -912,7 +925,7 @@ class VMOps(object): True) self._wait_with_callback(instance.id, task, callback) - def rescue(self, instance, callback): + def rescue(self, context, instance, _callback, network_info): """Rescue the specified instance. - shutdown the instance VM. @@ -930,17 +943,13 @@ class VMOps(object): self._shutdown(instance, vm_ref) self._acquire_bootlock(vm_ref) instance._rescue = True - self.spawn_rescue(instance) + self.spawn_rescue(context, instance, network_info) rescue_vm_ref = VMHelper.lookup(self._session, instance.name) - - vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] - vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"] - rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, - vdi_ref, 1, False) + rescue_vbd_ref = self._find_rescue_vbd_ref(vm_ref, rescue_vm_ref) self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref) - def unrescue(self, instance, callback): + def unrescue(self, instance, _callback): """Unrescue the specified instance. - unplug the instance VM's disk from the rescue VM. @@ -1022,11 +1031,23 @@ class VMOps(object): # TODO: implement this! return 'http://fakeajaxconsole/fake_url' + def host_power_action(self, host, action): + """Reboots or shuts down the host.""" + args = {"action": json.dumps(action)} + methods = {"reboot": "host_reboot", "shutdown": "host_shutdown"} + json_resp = self._call_xenhost(methods[action], args) + resp = json.loads(json_resp) + return resp["power_action"] + def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" args = {"enabled": json.dumps(enabled)} - json_resp = self._call_xenhost("set_host_enabled", args) - resp = json.loads(json_resp) + xenapi_resp = self._call_xenhost("set_host_enabled", args) + try: + resp = json.loads(xenapi_resp) + except TypeError as e: + # Already logged; return the message + return xenapi_resp.details[-1] return resp["status"] def _call_xenhost(self, method, arg_dict): @@ -1042,7 +1063,7 @@ class VMOps(object): #args={"params": arg_dict}) ret = self._session.wait_for_task(task, task_id) except self.XenAPI.Failure as e: - ret = None + ret = e LOG.error(_("The call to %(method)s returned an error: %(e)s.") % locals()) return ret diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index cddb8203b..91df80950 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -101,9 +101,6 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') -flags.DEFINE_string('xenapi_image_service', - 'glance', - 'Where to get VM images: glance or objectstore.') flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval', 5.0, 'The interval used for polling of coalescing vhds.' @@ -187,21 +184,24 @@ class XenAPIConnection(driver.ComputeDriver): def list_instances_detail(self): return self._vmops.list_instances_detail() - def spawn(self, instance, network_info, block_device_mapping=None): + def spawn(self, context, instance, + network_info=None, block_device_info=None): """Create VM instance""" - self._vmops.spawn(instance, network_info) + self._vmops.spawn(context, instance, network_info) - def revert_resize(self, instance): + def revert_migration(self, instance): """Reverts a resize, powering back on the instance""" - self._vmops.revert_resize(instance) + self._vmops.revert_migration(instance) - def finish_resize(self, instance, disk_info, network_info): + def finish_migration(self, context, instance, disk_info, network_info, + resize_instance=False): """Completes a resize, turning on the migrated instance""" - self._vmops.finish_resize(instance, disk_info, network_info) + self._vmops.finish_migration(context, instance, disk_info, + network_info, resize_instance) - def snapshot(self, instance, image_id): + def snapshot(self, context, instance, image_id): """ Create snapshot from a running VM instance """ - self._vmops.snapshot(instance, image_id) + self._vmops.snapshot(context, instance, image_id) def reboot(self, instance, network_info): """Reboot VM instance""" @@ -242,13 +242,13 @@ class XenAPIConnection(driver.ComputeDriver): """resume the specified instance""" self._vmops.resume(instance, callback) - def rescue(self, instance, callback, network_info): + def rescue(self, context, instance, _callback, network_info): """Rescue the specified instance""" - self._vmops.rescue(instance, callback) + self._vmops.rescue(context, instance, _callback, network_info) - def unrescue(self, instance, callback, network_info): + def unrescue(self, instance, _callback, network_info): """Unrescue the specified instance""" - self._vmops.unrescue(instance, callback) + self._vmops.unrescue(instance, _callback) def poll_rescued_instances(self, timeout): """Poll for rescued instances""" @@ -332,6 +332,19 @@ class XenAPIConnection(driver.ComputeDriver): True, run the update first.""" return self.HostState.get_host_stats(refresh=refresh) + def host_power_action(self, host, action): + """The only valid values for 'action' on XenServer are 'reboot' or + 'shutdown', even though the API also accepts 'startup'. As this is + not technically possible on XenServer, since the host is the same + physical machine as the hypervisor, if this is requested, we need to + raise an exception. + """ + if action in ("reboot", "shutdown"): + return self._vmops.host_power_action(host, action) + else: + msg = _("Host startup on XenServer is not supported.") + raise NotImplementedError(msg) + def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" return self._vmops.set_host_enabled(host, enabled) @@ -394,11 +407,10 @@ class XenAPISession(object): try: name = self._session.xenapi.task.get_name_label(task) status = self._session.xenapi.task.get_status(task) + # Ensure action is never > 255 + action = dict(action=name[:255], error=None) if id: - action = dict( - instance_id=int(id), - action=name[0:255], # Ensure action is never > 255 - error=None) + action["instance_id"] = int(id) if status == "pending": return elif status == "success": @@ -441,7 +453,7 @@ class XenAPISession(object): params = None try: params = eval(exc.details[3]) - except: + except Exception: raise exc raise self.XenAPI.Failure(params) else: diff --git a/nova/vnc/proxy.py b/nova/vnc/proxy.py index c4603803b..2e3e38ca9 100644 --- a/nova/vnc/proxy.py +++ b/nova/vnc/proxy.py @@ -60,7 +60,7 @@ class WebsocketVNCProxy(object): break d = base64.b64encode(d) dest.send(d) - except: + except Exception: source.close() dest.close() @@ -72,7 +72,7 @@ class WebsocketVNCProxy(object): break d = base64.b64decode(d) dest.sendall(d) - except: + except Exception: source.close() dest.close() diff --git a/nova/wsgi.py b/nova/wsgi.py index eae3afcb4..c8ddb97d7 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -274,6 +274,18 @@ class Middleware(Application): return self.process_response(response) +class InjectContext(Middleware): + """Add a 'nova.context' to WSGI environ.""" + def __init__(self, context, *args, **kwargs): + self.context = context + super(InjectContext, self).__init__(*args, **kwargs) + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, req): + req.environ['nova.context'] = self.context + return self.application + + class Debug(Middleware): """Helper class for debugging a WSGI application. diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index fbe080b22..a06312890 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -67,12 +67,17 @@ def _copy_kernel_vdi(dest, copy_args): def _download_tarball(sr_path, staging_path, image_id, glance_host, - glance_port): + glance_port, auth_token): """Download the tarball image from Glance and extract it into the staging area. """ + # Build request headers + headers = {} + if auth_token: + headers['x-auth-token'] = auth_token + conn = httplib.HTTPConnection(glance_host, glance_port) - conn.request('GET', '/v1/images/%s' % image_id) + conn.request('GET', '/v1/images/%s' % image_id, headers=headers) resp = conn.getresponse() if resp.status == httplib.NOT_FOUND: raise Exception("Image '%s' not found in Glance" % image_id) @@ -236,12 +241,29 @@ def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): os.link(source, link_name) -def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type): +def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type, + auth_token): """ Create a tarball of the image and then stream that into Glance using chunked-transfer-encoded HTTP. """ conn = httplib.HTTPConnection(glance_host, glance_port) + + # NOTE(dprince): We need to resend any existing Glance meta/property + # headers so they are preserved in Glance. We obtain them here with a + # HEAD request. + conn.request('HEAD', '/v1/images/%s' % image_id) + resp = conn.getresponse() + if resp.status != httplib.OK: + raise Exception("Unexpected response from Glance %i" % resp.status) + headers = {} + for header, value in resp.getheaders(): + if header.lower().startswith("x-image-meta-property-"): + headers[header.lower()] = value + + # Toss body so connection state-machine is ready for next request/response + resp.read() + # NOTE(sirp): httplib under python2.4 won't accept a file-like object # to request conn.putrequest('PUT', '/v1/images/%s' % image_id) @@ -254,7 +276,7 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type): # 2. We're currently uploading a vanilla tarball. In order to be OVF/OVA # compliant, we'll need to embed a minimal OVF manifest as the first # file. - headers = { + ovf_headers = { 'content-type': 'application/octet-stream', 'transfer-encoding': 'chunked', 'x-image-meta-is-public': 'True', @@ -263,6 +285,12 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type): 'x-image-meta-container-format': 'ovf', 'x-image-meta-property-os-type': os_type} + # If we have an auth_token, set an x-auth-token header + if auth_token: + ovf_headers['x-auth-token'] = auth_token + + headers.update(ovf_headers) + for header, value in headers.iteritems(): conn.putheader(header, value) conn.endheaders() @@ -364,11 +392,12 @@ def download_vhd(session, args): glance_port = params["glance_port"] uuid_stack = params["uuid_stack"] sr_path = params["sr_path"] + auth_token = params["auth_token"] staging_path = _make_staging_area(sr_path) try: _download_tarball(sr_path, staging_path, image_id, glance_host, - glance_port) + glance_port, auth_token) # Right now, it's easier to return a single string via XenAPI, # so we'll json encode the list of VHDs. return json.dumps(_import_vhds(sr_path, staging_path, uuid_stack)) @@ -386,12 +415,13 @@ def upload_vhd(session, args): glance_port = params["glance_port"] sr_path = params["sr_path"] os_type = params["os_type"] + auth_token = params["auth_token"] staging_path = _make_staging_area(sr_path) try: _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids) _upload_tarball(staging_path, image_id, glance_host, glance_port, - os_type) + os_type, auth_token) finally: _cleanup_staging_area(staging_path) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost index 292bbce12..cd9694ce1 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost @@ -39,6 +39,7 @@ import pluginlib_nova as pluginlib pluginlib.configure_logging("xenhost") host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)") +config_file_path = "/usr/etc/xenhost.conf" def jsonify(fnc): @@ -103,6 +104,104 @@ def set_host_enabled(self, arg_dict): return {"status": status} +def _write_config_dict(dct): + conf_file = file(config_file_path, "w") + json.dump(dct, conf_file) + conf_file.close() + + +def _get_config_dict(): + """Returns a dict containing the key/values in the config file. + If the file doesn't exist, it is created, and an empty dict + is returned. + """ + try: + conf_file = file(config_file_path) + config_dct = json.load(conf_file) + conf_file.close() + except IOError: + # File doesn't exist + config_dct = {} + # Create the file + _write_config_dict(config_dct) + return config_dct + + +@jsonify +def get_config(self, arg_dict): + """Return the value stored for the specified key, or None if no match.""" + conf = _get_config_dict() + params = arg_dict["params"] + try: + dct = json.loads(params) + except Exception, e: + dct = params + key = dct["key"] + ret = conf.get(key) + if ret is None: + # Can't jsonify None + return "None" + return ret + + +@jsonify +def set_config(self, arg_dict): + """Write the specified key/value pair, overwriting any existing value.""" + conf = _get_config_dict() + params = arg_dict["params"] + try: + dct = json.loads(params) + except Exception, e: + dct = params + key = dct["key"] + val = dct["value"] + if val is None: + # Delete the key, if present + conf.pop(key, None) + else: + conf.update({key: val}) + _write_config_dict(conf) + + +def _power_action(action): + host_uuid = _get_host_uuid() + # Host must be disabled first + result = _run_command("xe host-disable") + if result: + raise pluginlib.PluginError(result) + # All running VMs must be shutdown + result = _run_command("xe vm-shutdown --multiple power-state=running") + if result: + raise pluginlib.PluginError(result) + cmds = {"reboot": "xe host-reboot", "startup": "xe host-power-on", + "shutdown": "xe host-shutdown"} + result = _run_command(cmds[action]) + # Should be empty string + if result: + raise pluginlib.PluginError(result) + return {"power_action": action} + + +@jsonify +def host_reboot(self, arg_dict): + """Reboots the host.""" + return _power_action("reboot") + + +@jsonify +def host_shutdown(self, arg_dict): + """Reboots the host.""" + return _power_action("shutdown") + + +@jsonify +def host_start(self, arg_dict): + """Starts the host. Currently not feasible, since the host + runs on the same machine as Xen. + """ + return _power_action("startup") + + @jsonify def host_data(self, arg_dict): """Runs the commands on the xenstore host to return the current status @@ -115,6 +214,9 @@ def host_data(self, arg_dict): # We have the raw dict of values. Extract those that we need, # and convert the data types as needed. ret_dict = cleanup(parsed_data) + # Add any config settings + config = _get_config_dict() + ret_dict.update(config) return ret_dict @@ -217,4 +319,9 @@ def cleanup(dct): if __name__ == "__main__": XenAPIPlugin.dispatch( {"host_data": host_data, - "set_host_enabled": set_host_enabled}) + "set_host_enabled": set_host_enabled, + "host_shutdown": host_shutdown, + "host_reboot": host_reboot, + "host_start": host_start, + "get_config": get_config, + "set_config": set_config}) @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2263,10 +2208,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2265,10 +2210,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2263,10 +2208,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -131,33 +131,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "PID-Datei %s existiert nicht. Läuft der Daemon nicht?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "Kein passender Prozess gefunden" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "Bedient %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "Alle vorhandenen FLAGS:" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "%s wird gestartet" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1785,34 +1758,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2270,10 +2215,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" diff --git a/po/en_AU.po b/po/en_AU.po index e53f9fc07..3fa62c006 100644 --- a/po/en_AU.po +++ b/po/en_AU.po @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2263,10 +2208,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" diff --git a/po/en_GB.po b/po/en_GB.po index 601f6170b..b204c93a1 100644 --- a/po/en_GB.po +++ b/po/en_GB.po @@ -130,33 +130,6 @@ msgstr "compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "compute.api::resume %s" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "Wrong number of arguments." - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "pidfile %s does not exist. Daemon not running?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "No such process" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "Serving %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "Full set of FLAGS:" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Starting %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1803,34 +1776,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2288,10 +2233,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -130,33 +130,6 @@ msgstr "compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "compute.api::resume %s" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "Cantidad de argumentos incorrecta" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "El \"pidfile\" %s no existe. Quizás el servicio no este corriendo.\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "No existe el proceso" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "Sirviendo %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "Conjunto completo de opciones (FLAGS):" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Iniciando %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1819,34 +1792,6 @@ msgstr "" msgid "Got exception: %s" msgstr "Obtenida excepción %s" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "actualizando %s..." - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "error inesperado durante la actualización" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "excepción inexperada al obtener la conexión" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "Encontrada interfaz: %s" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2309,10 +2254,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -133,35 +133,6 @@ msgstr "compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "compute.api::resume %s" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "Nombre d'arguments incorrect." - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" -"Le fichier pid %s n'existe pas. Est-ce que le processus est en cours " -"d'exécution ?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "Aucun processus de ce type" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "En train de servir %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "Ensemble de propriétés complet :" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Démarrage de %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1865,34 +1836,6 @@ msgstr "Tâche [%(name)s] %(task)s état : %(status)s %(error_info)s" msgid "Got exception: %s" msgstr "Reçu exception : %s" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "mise à jour %s..." - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "erreur inopinée pendant la ise à jour" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "Ne peut pas récupérer blockstats pour \"%(disk)s\" sur \"%(iid)s\"" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "Ne peut pas récupérer ifstats pour \"%(interface)s\" sur \"%(iid)s\"" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "erreur inopinée pendant la connexion" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "Instance trouvée : %s" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2373,10 +2316,6 @@ msgstr "Démarrage %(arg0)s sur %(host)s:%(port)s" msgid "You must implement __call__" msgstr "Vous devez implémenter __call__" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "Démarrage du superviseur d'instance" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "Allocation IP" @@ -134,34 +134,6 @@ msgstr "compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "compute.api::resume %s" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "Numero errato di argomenti" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" -"Il pidfile %s non esiste. Assicurarsi che il demone é in esecuzione.\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "Nessun processo trovato" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "Servire %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "Insieme di FLAGS:" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Avvio di %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1791,34 +1763,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2278,10 +2222,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -130,33 +130,6 @@ msgstr "例外: compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "例外: compute.api::resume %s" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "引数の数が異なります。" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "pidfile %s が存在しません。デーモンは実行中ですか?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "そのようなプロセスはありません" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "%s サービスの開始" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "FLAGSの一覧:" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "%s を起動中" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1808,34 +1781,6 @@ msgstr "タスク [%(name)s] %(task)s 状態: %(status)s %(error_info)s" msgid "Got exception: %s" msgstr "例外 %s が発生しました。" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "%s の情報の更新…" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "更新の最中に予期しないエラーが発生しました。" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "\"%(iid)s\" 上の \"%(disk)s\" 用のブロック統計(blockstats)が取得できません" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "\"%(iid)s\" 上の %(interface)s\" 用インターフェース統計(ifstats)が取得できません" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "接続に際し予期しないエラーが発生しました。" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "インスタンス %s が見つかりました。" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2311,10 +2256,6 @@ msgstr "%(host)s:%(port)s 上で %(arg0)s を開始しています" msgid "You must implement __call__" msgstr "__call__ を実装しなければなりません" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "インスタンスモニタを開始しています" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "IP アドレスをリースしました" diff --git a/po/nova.pot b/po/nova.pot index 58140302d..e180ed750 100644 --- a/po/nova.pot +++ b/po/nova.pot @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2263,10 +2208,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" diff --git a/po/pt_BR.po b/po/pt_BR.po index f067a69e0..b3aefce44 100644 --- a/po/pt_BR.po +++ b/po/pt_BR.po @@ -126,34 +126,6 @@ msgstr "compute.api::suspend %s" msgid "compute.api::resume %s" msgstr "compute.api::resume %s" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "Número errado de argumentos." - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" -"Arquivo do id do processo (pidfile) %s não existe. O Daemon está parado?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "Processo inexistente" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "Servindo %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "Conjunto completo de FLAGS:" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Iniciando %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1804,34 +1776,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2290,10 +2234,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "Неверное число аргументов." - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "pidfile %s не обнаружен. Демон не запущен?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Запускается %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1779,34 +1752,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "обновление %s..." - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "неожиданная ошибка во время обновления" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2264,10 +2209,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2265,10 +2210,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "Обслуговування %s" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "Запускається %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2263,10 +2208,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" diff --git a/po/zh_CN.po b/po/zh_CN.po index c3d292a93..d0ddcd2f7 100644 --- a/po/zh_CN.po +++ b/po/zh_CN.po @@ -17,11 +17,6 @@ msgstr "" "X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n" "X-Generator: Launchpad (build 13405)\n" -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "启动 %s 中" - #: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 #: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 #: ../nova/scheduler/simple.py:122 @@ -135,28 +130,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "错误参数个数。" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "pidfile %s 不存在,守护进程是否运行?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "没有该进程" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "正在为 %s 服务" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "FLAGS全集:" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1785,34 +1758,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2270,10 +2215,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" diff --git a/po/zh_TW.po b/po/zh_TW.po index ad14c0e32..896e69618 100644 --- a/po/zh_TW.po +++ b/po/zh_TW.po @@ -125,33 +125,6 @@ msgstr "" msgid "compute.api::resume %s" msgstr "" -#: ../nova/twistd.py:157 -msgid "Wrong number of arguments." -msgstr "" - -#: ../nova/twistd.py:209 -#, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "pidfile %s 不存在. Daemon未啟動?\n" - -#: ../nova/twistd.py:221 -msgid "No such process" -msgstr "沒有此一程序" - -#: ../nova/twistd.py:230 ../nova/service.py:224 -#, python-format -msgid "Serving %s" -msgstr "" - -#: ../nova/twistd.py:262 ../nova/service.py:225 -msgid "Full set of FLAGS:" -msgstr "" - -#: ../nova/twistd.py:266 -#, python-format -msgid "Starting %s" -msgstr "正在啟動 %s" - #: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 #: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 #: ../nova/api/ec2/__init__.py:317 @@ -1778,34 +1751,6 @@ msgstr "" msgid "Got exception: %s" msgstr "" -#: ../nova/compute/monitor.py:259 -#, python-format -msgid "updating %s..." -msgstr "" - -#: ../nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "" - -#: ../nova/compute/monitor.py:356 -#, python-format -msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:379 -#, python-format -msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" -msgstr "" - -#: ../nova/compute/monitor.py:414 -msgid "unexpected exception getting connection" -msgstr "" - -#: ../nova/compute/monitor.py:429 -#, python-format -msgid "Found instance: %s" -msgstr "" - #: ../nova/volume/san.py:67 #, python-format msgid "Could not find iSCSI export for volume %s" @@ -2263,10 +2208,6 @@ msgstr "" msgid "You must implement __call__" msgstr "" -#: ../bin/nova-instancemonitor.py:55 -msgid "Starting instance monitor" -msgstr "" - #: ../bin/nova-dhcpbridge.py:58 msgid "leasing ip" msgstr "" @@ -124,7 +124,6 @@ setup(name='nova', 'bin/nova-dhcpbridge', 'bin/nova-direct-api', 'bin/nova-import-canonical-imagestore', - 'bin/nova-instancemonitor', 'bin/nova-logspool', 'bin/nova-manage', 'bin/nova-network', diff --git a/smoketests/test_netadmin.py b/smoketests/test_netadmin.py index 60086f065..8c8fa35b8 100644 --- a/smoketests/test_netadmin.py +++ b/smoketests/test_netadmin.py @@ -109,13 +109,17 @@ class SecurityGroupTests(base.UserSmokeTestCase): def __public_instance_is_accessible(self): id_url = "latest/meta-data/instance-id" - options = "-s --max-time 1" + options = "-f -s --max-time 1" command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url) - instance_id = commands.getoutput(command).strip() + status, output = commands.getstatusoutput(command) + instance_id = output.strip() + if status > 0: + return False if not instance_id: return False if instance_id != self.data['instance'].id: - raise Exception("Wrong instance id") + raise Exception("Wrong instance id. Expected: %s, Got: %s" % + (self.data['instance'].id, instance_id)) return True def test_001_can_create_security_group(self): diff --git a/tools/eventlet-patch b/tools/eventlet-patch deleted file mode 100644 index c87c5f279..000000000 --- a/tools/eventlet-patch +++ /dev/null @@ -1,24 +0,0 @@ -# HG changeset patch -# User Soren Hansen <soren@linux2go.dk> -# Date 1297678255 -3600 -# Node ID 4c846d555010bb5a91ab4da78dfe596451313742 -# Parent 5b7e9946c79f005c028eb63207cf5eb7bb21d1c3 -Don't attempt to wrap GreenPipes in GreenPipe - -If the os module is monkeypatched, Python's standard subprocess module -will return greenio.GreenPipe instances for Popen objects' stdin, stdout, -and stderr attributes. However, eventlet.green.subprocess tries to wrap -these attributes in another greenio.GreenPipe, which GreenPipe refuses. - -diff -r 5b7e9946c79f -r 4c846d555010 eventlet/green/subprocess.py ---- a/eventlet/green/subprocess.py Sat Feb 05 13:05:05 2011 -0800 -+++ b/eventlet/green/subprocess.py Mon Feb 14 11:10:55 2011 +0100 -@@ -27,7 +27,7 @@ - # eventlet.processes.Process.run() method. - for attr in "stdin", "stdout", "stderr": - pipe = getattr(self, attr) -- if pipe is not None: -+ if pipe is not None and not type(pipe) == greenio.GreenPipe: - wrapped_pipe = greenio.GreenPipe(pipe, pipe.mode, bufsize) - setattr(self, attr, wrapped_pipe) - __init__.__doc__ = subprocess_orig.Popen.__init__.__doc__ diff --git a/tools/install_venv.py b/tools/install_venv.py index f4b6583ed..3c2f6979f 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -31,7 +31,6 @@ import sys ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) VENV = os.path.join(ROOT, '.nova-venv') PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires') -TWISTED_NOVA = 'http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz' PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) @@ -106,20 +105,12 @@ def install_dependencies(venv=VENV): 'greenlet'], redirect_output=False) run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES], redirect_output=False) - run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, - TWISTED_NOVA], redirect_output=False) # Tell the virtual env how to "import nova" pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", "nova.pth") f = open(pthfile, 'w') f.write("%s\n" % ROOT) - # Patch eventlet (see FAQ # 1485) - patchsrc = os.path.join(ROOT, 'tools', 'eventlet-patch') - patchfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", - "eventlet", "green", "subprocess.py") - patch_cmd = "patch %s %s" % (patchfile, patchsrc) - os.system(patch_cmd) def print_help(): diff --git a/tools/pip-requires b/tools/pip-requires index dec93c351..60b502ffd 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -9,7 +9,8 @@ boto==1.9b carrot==0.10.5 eventlet lockfile==0.8 -python-novaclient==2.5.7 +lxml==2.3 +python-novaclient==2.6.0 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 @@ -20,13 +21,13 @@ mox==0.5.3 greenlet==0.3.1 nose bzr -Twisted>=10.1.0 PasteDeploy paste sqlalchemy-migrate netaddr sphinx glance +xattr>=0.6.0 nova-adminclient suds==0.4 coverage |
