summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEd Leafe <ed@leafe.com>2011-08-03 16:39:48 +0000
committerEd Leafe <ed@leafe.com>2011-08-03 16:39:48 +0000
commit3f7c71fd38a67e6983de0bb268e5c65abc5753f4 (patch)
treec4c930b16093cd3e32d83676a50826713516c431
parent1753da4d586f896f449828879e4361241289e376 (diff)
parente2770a4558c95aa4b6e276ebe18dc580a82e6d67 (diff)
downloadnova-3f7c71fd38a67e6983de0bb268e5c65abc5753f4.tar.gz
nova-3f7c71fd38a67e6983de0bb268e5c65abc5753f4.tar.xz
nova-3f7c71fd38a67e6983de0bb268e5c65abc5753f4.zip
Merged trunk
-rw-r--r--Authors2
-rwxr-xr-xbin/nova-instancemonitor59
-rw-r--r--bin/nova-logspool1
-rwxr-xr-xbin/nova-manage21
-rwxr-xr-xbin/nova-objectstore2
-rwxr-xr-xcontrib/nova.sh2
-rw-r--r--doc/source/api/autoindex.rst3
-rw-r--r--doc/source/api/nova..compute.monitor.rst6
-rw-r--r--doc/source/api/nova..tests.test_twistd.rst6
-rw-r--r--doc/source/api/nova..twistd.rst6
-rw-r--r--doc/source/code.rst3
-rw-r--r--doc/source/devref/architecture.rst2
-rw-r--r--doc/source/devref/compute.rst13
-rw-r--r--doc/source/devref/development.environment.rst2
-rw-r--r--doc/source/devref/nova.rst20
-rw-r--r--nova/api/direct.py3
-rw-r--r--nova/api/ec2/__init__.py32
-rw-r--r--nova/api/openstack/__init__.py6
-rw-r--r--nova/api/openstack/auth.py28
-rw-r--r--nova/api/openstack/common.py82
-rw-r--r--nova/api/openstack/contrib/floating_ips.py4
-rw-r--r--nova/api/openstack/create_instance_helper.py79
-rw-r--r--nova/api/openstack/image_metadata.py88
-rw-r--r--nova/api/openstack/images.py133
-rw-r--r--nova/api/openstack/servers.py344
-rw-r--r--nova/api/openstack/versions.py270
-rw-r--r--nova/api/openstack/views/servers.py5
-rw-r--r--nova/api/openstack/views/versions.py56
-rw-r--r--nova/api/openstack/wsgi.py12
-rw-r--r--nova/auth/dbdriver.py2
-rw-r--r--nova/auth/manager.py14
-rw-r--r--nova/cloudpipe/pipelib.py18
-rw-r--r--nova/compute/api.py11
-rw-r--r--nova/compute/instance_types.py9
-rw-r--r--nova/compute/manager.py32
-rw-r--r--nova/compute/monitor.py435
-rw-r--r--nova/context.py95
-rw-r--r--nova/db/sqlalchemy/api.py24
-rw-r--r--nova/db/sqlalchemy/models.py16
-rw-r--r--nova/exception.py8
-rw-r--r--nova/flags.py2
-rw-r--r--nova/image/glance.py11
-rw-r--r--nova/image/s3.py14
-rw-r--r--nova/log.py5
-rw-r--r--nova/network/manager.py64
-rw-r--r--nova/notifier/api.py4
-rw-r--r--nova/rpc/__init__.py66
-rw-r--r--nova/rpc/amqp.py (renamed from nova/rpc.py)23
-rw-r--r--nova/rpc/common.py23
-rw-r--r--nova/scheduler/least_cost.py1
-rw-r--r--nova/scheduler/zone_aware_scheduler.py2
-rw-r--r--nova/service.py28
-rw-r--r--nova/test.py16
-rw-r--r--nova/tests/api/openstack/contrib/test_floating_ips.py10
-rw-r--r--nova/tests/api/openstack/contrib/test_multinic_xs.py8
-rw-r--r--nova/tests/api/openstack/fakes.py43
-rw-r--r--nova/tests/api/openstack/test_accounts.py10
-rw-r--r--nova/tests/api/openstack/test_adminapi.py17
-rw-r--r--nova/tests/api/openstack/test_auth.py42
-rw-r--r--nova/tests/api/openstack/test_common.py201
-rw-r--r--nova/tests/api/openstack/test_extensions.py84
-rw-r--r--nova/tests/api/openstack/test_flavors.py5
-rw-r--r--nova/tests/api/openstack/test_flavors_extra_specs.py (renamed from nova/tests/api/openstack/extensions/test_flavors_extra_specs.py)66
-rw-r--r--nova/tests/api/openstack/test_image_metadata.py215
-rw-r--r--nova/tests/api/openstack/test_images.py283
-rw-r--r--nova/tests/api/openstack/test_limits.py14
-rw-r--r--nova/tests/api/openstack/test_server_metadata.py14
-rw-r--r--nova/tests/api/openstack/test_servers.py1243
-rw-r--r--nova/tests/api/openstack/test_shared_ip_groups.py13
-rw-r--r--nova/tests/api/openstack/test_users.py9
-rw-r--r--nova/tests/api/openstack/test_versions.py862
-rw-r--r--nova/tests/api/openstack/test_zones.py25
-rw-r--r--nova/tests/glance/stubs.py5
-rw-r--r--nova/tests/hyperv_unittest.py9
-rw-r--r--nova/tests/image/test_glance.py3
-rw-r--r--nova/tests/integrated/test_servers.py1
-rw-r--r--nova/tests/scheduler/test_scheduler.py22
-rw-r--r--nova/tests/scheduler/test_zone_aware_scheduler.py18
-rw-r--r--nova/tests/test_access.py21
-rw-r--r--nova/tests/test_adminapi.py18
-rw-r--r--nova/tests/test_api.py76
-rw-r--r--nova/tests/test_auth.py19
-rw-r--r--nova/tests/test_cloud.py56
-rw-r--r--nova/tests/test_compute.py51
-rw-r--r--nova/tests/test_console.py19
-rw-r--r--nova/tests/test_db_api.py34
-rw-r--r--nova/tests/test_libvirt.py71
-rw-r--r--nova/tests/test_objectstore.py27
-rw-r--r--nova/tests/test_quota.py62
-rw-r--r--nova/tests/test_rpc.py61
-rw-r--r--nova/tests/test_rpc_amqp.py68
-rw-r--r--nova/tests/test_service.py170
-rw-r--r--nova/tests/test_test.py13
-rw-r--r--nova/tests/test_twistd.py53
-rw-r--r--nova/tests/test_utils.py78
-rw-r--r--nova/tests/test_vmwareapi.py24
-rw-r--r--nova/tests/test_xenapi.py186
-rw-r--r--nova/tests/xenapi/stubs.py6
-rw-r--r--nova/twistd.py267
-rw-r--r--nova/utils.py97
-rw-r--r--nova/virt/driver.py44
-rw-r--r--nova/virt/fake.py19
-rw-r--r--nova/virt/hyperv.py9
-rw-r--r--nova/virt/images.py6
-rw-r--r--nova/virt/libvirt/connection.py64
-rw-r--r--nova/virt/vmwareapi/vmops.py12
-rw-r--r--nova/virt/vmwareapi_conn.py9
-rw-r--r--nova/virt/xenapi/vm_utils.py176
-rw-r--r--nova/virt/xenapi/vmops.py94
-rw-r--r--nova/virt/xenapi_conn.py24
-rw-r--r--nova/wsgi.py12
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/glance22
-rw-r--r--po/ast.po59
-rw-r--r--po/cs.po59
-rw-r--r--po/da.po59
-rw-r--r--po/de.po59
-rw-r--r--po/en_AU.po59
-rw-r--r--po/en_GB.po59
-rw-r--r--po/es.po59
-rw-r--r--po/fr.po61
-rw-r--r--po/it.po60
-rw-r--r--po/ja.po59
-rw-r--r--po/nova.pot59
-rw-r--r--po/pt_BR.po60
-rw-r--r--po/ru.po59
-rw-r--r--po/tl.po59
-rw-r--r--po/uk.po59
-rw-r--r--po/zh_CN.po59
-rw-r--r--po/zh_TW.po59
-rw-r--r--setup.py1
-rw-r--r--tools/eventlet-patch24
-rw-r--r--tools/install_venv.py9
-rw-r--r--tools/pip-requires4
133 files changed, 4122 insertions, 4326 deletions
diff --git a/Authors b/Authors
index 1a07946bd..0e5931462 100644
--- a/Authors
+++ b/Authors
@@ -67,6 +67,7 @@ Lvov Maxim <usrleon@gmail.com>
Mark Washenberger <mark.washenberger@rackspace.com>
Masanori Itoh <itoumsn@nttdata.co.jp>
Matt Dietz <matt.dietz@rackspace.com>
+Matthew Hooker <matt@cloudscaling.com>
Michael Gundlach <michael.gundlach@rackspace.com>
Mike Scherbakov <mihgen@gmail.com>
Mohammed Naser <mnaser@vexxhost.com>
@@ -105,3 +106,4 @@ Yoshiaki Tamura <yoshi@midokura.jp>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Yuriy Taraday <yorik.sar@gmail.com>
Zhixue Wu <Zhixue.Wu@citrix.com>
+Zed Shaw <zedshaw@zedshaw.com>
diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor
deleted file mode 100755
index b9d4e49d7..000000000
--- a/bin/nova-instancemonitor
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
- Daemon for Nova RRD based instance resource monitoring.
-"""
-
-import gettext
-import os
-import sys
-from twisted.application import service
-
-# If ../nova/__init__.py exists, add ../ to Python search path, so that
-# it will override what happens to be installed in /usr/(local/)lib/python...
-possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
- os.pardir,
- os.pardir))
-if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
- sys.path.insert(0, possible_topdir)
-
-gettext.install('nova', unicode=1)
-
-from nova import log as logging
-from nova import utils
-from nova import twistd
-from nova.compute import monitor
-
-LOG = logging.getLogger('nova.instancemonitor')
-
-
-if __name__ == '__main__':
- utils.default_flagfile()
- twistd.serve(__file__)
-
-if __name__ == '__builtin__':
- LOG.warn(_('Starting instance monitor'))
- # pylint: disable=C0103
- monitor = monitor.InstanceMonitor()
-
- # This is the parent service that twistd will be looking for when it
- # parses this file, return it so that we can get it into globals below
- application = service.Application('nova-instancemonitor')
- monitor.setServiceParent(application)
diff --git a/bin/nova-logspool b/bin/nova-logspool
index 097459b12..a876f4c71 100644
--- a/bin/nova-logspool
+++ b/bin/nova-logspool
@@ -81,7 +81,6 @@ class LogReader(object):
if level == 'ERROR':
self.handle_logged_error(line)
elif level == '[-]' and self.last_error:
- # twisted stack trace line
clean_line = " ".join(line.split(" ")[6:])
self.last_error.trace = self.last_error.trace + clean_line
else:
diff --git a/bin/nova-manage b/bin/nova-manage
index 75d74903c..807753a2e 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -662,8 +662,9 @@ class NetworkCommands(object):
# check for certain required inputs
if not label:
raise exception.NetworkNotCreated(req='--label')
- if not fixed_range_v4:
- raise exception.NetworkNotCreated(req='--fixed_range_v4')
+ if not (fixed_range_v4 or fixed_range_v6):
+ req = '--fixed_range_v4 or --fixed_range_v6'
+ raise exception.NetworkNotCreated(req=req)
bridge = bridge or FLAGS.flat_network_bridge
if not bridge:
@@ -689,16 +690,6 @@ class NetworkCommands(object):
if FLAGS.network_manager in interface_required:
raise exception.NetworkNotCreated(req='--bridge_interface')
- if FLAGS.use_ipv6:
- fixed_range_v6 = fixed_range_v6 or FLAGS.fixed_range_v6
- if not fixed_range_v6:
- raise exception.NetworkNotCreated(req='with use_ipv6, '
- '--fixed_range_v6')
- gateway_v6 = gateway_v6 or FLAGS.gateway_v6
- if not gateway_v6:
- raise exception.NetworkNotCreated(req='with use_ipv6, '
- '--gateway_v6')
-
# sanitize other input using FLAGS if necessary
if not num_networks:
num_networks = FLAGS.num_networks
@@ -735,8 +726,8 @@ class NetworkCommands(object):
def list(self):
"""List all created networks"""
print "%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" % (
- _('network'),
- _('netmask'),
+ _('IPv4'),
+ _('IPv6'),
_('start address'),
_('DNS1'),
_('DNS2'),
@@ -745,7 +736,7 @@ class NetworkCommands(object):
for network in db.network_get_all(context.get_admin_context()):
print "%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" % (
network.cidr,
- network.netmask,
+ network.cidr_v6,
network.dhcp_start,
network.dns1,
network.dns2,
diff --git a/bin/nova-objectstore b/bin/nova-objectstore
index 1aef3a255..4d5aec445 100755
--- a/bin/nova-objectstore
+++ b/bin/nova-objectstore
@@ -18,7 +18,7 @@
# under the License.
"""
- Twisted daemon for nova objectstore. Supports S3 API.
+ Daemon for nova objectstore. Supports S3 API.
"""
import gettext
diff --git a/contrib/nova.sh b/contrib/nova.sh
index eab680580..7994e5133 100755
--- a/contrib/nova.sh
+++ b/contrib/nova.sh
@@ -75,7 +75,7 @@ if [ "$CMD" == "install" ]; then
sudo modprobe kvm
sudo /etc/init.d/libvirt-bin restart
sudo modprobe nbd
- sudo apt-get install -y python-twisted python-mox python-ipy python-paste
+ sudo apt-get install -y python-mox python-ipy python-paste
sudo apt-get install -y python-migrate python-gflags python-greenlet
sudo apt-get install -y python-libvirt python-libxml2 python-routes
sudo apt-get install -y python-netaddr python-pastedeploy python-eventlet
diff --git a/doc/source/api/autoindex.rst b/doc/source/api/autoindex.rst
index 329a465db..d99d16eaa 100644
--- a/doc/source/api/autoindex.rst
+++ b/doc/source/api/autoindex.rst
@@ -26,7 +26,6 @@
nova..compute.api.rst
nova..compute.instance_types.rst
nova..compute.manager.rst
- nova..compute.monitor.rst
nova..compute.power_state.rst
nova..console.api.rst
nova..console.fake.rst
@@ -115,13 +114,11 @@
nova..tests.test_scheduler.rst
nova..tests.test_service.rst
nova..tests.test_test.rst
- nova..tests.test_twistd.rst
nova..tests.test_utils.rst
nova..tests.test_virt.rst
nova..tests.test_volume.rst
nova..tests.test_xenapi.rst
nova..tests.xenapi.stubs.rst
- nova..twistd.rst
nova..utils.rst
nova..version.rst
nova..virt.connection.rst
diff --git a/doc/source/api/nova..compute.monitor.rst b/doc/source/api/nova..compute.monitor.rst
deleted file mode 100644
index a91169ecd..000000000
--- a/doc/source/api/nova..compute.monitor.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..compute.monitor` Module
-==============================================================================
-.. automodule:: nova..compute.monitor
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..tests.test_twistd.rst b/doc/source/api/nova..tests.test_twistd.rst
deleted file mode 100644
index cae0c0a28..000000000
--- a/doc/source/api/nova..tests.test_twistd.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..tests.test_twistd` Module
-==============================================================================
-.. automodule:: nova..tests.test_twistd
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/api/nova..twistd.rst b/doc/source/api/nova..twistd.rst
deleted file mode 100644
index d4145396d..000000000
--- a/doc/source/api/nova..twistd.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-The :mod:`nova..twistd` Module
-==============================================================================
-.. automodule:: nova..twistd
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/code.rst b/doc/source/code.rst
index 6b8d5661f..73fc31e1a 100644
--- a/doc/source/code.rst
+++ b/doc/source/code.rst
@@ -21,7 +21,6 @@ Generating source/api/nova..cloudpipe.pipelib.rst
Generating source/api/nova..compute.disk.rst
Generating source/api/nova..compute.instance_types.rst
Generating source/api/nova..compute.manager.rst
-Generating source/api/nova..compute.monitor.rst
Generating source/api/nova..compute.power_state.rst
Generating source/api/nova..context.rst
Generating source/api/nova..crypto.rst
@@ -79,11 +78,9 @@ Generating source/api/nova..tests.rpc_unittest.rst
Generating source/api/nova..tests.runtime_flags.rst
Generating source/api/nova..tests.scheduler_unittest.rst
Generating source/api/nova..tests.service_unittest.rst
-Generating source/api/nova..tests.twistd_unittest.rst
Generating source/api/nova..tests.validator_unittest.rst
Generating source/api/nova..tests.virt_unittest.rst
Generating source/api/nova..tests.volume_unittest.rst
-Generating source/api/nova..twistd.rst
Generating source/api/nova..utils.rst
Generating source/api/nova..validate.rst
Generating source/api/nova..virt.connection.rst
diff --git a/doc/source/devref/architecture.rst b/doc/source/devref/architecture.rst
index 233cd6f08..7f44ecdf2 100644
--- a/doc/source/devref/architecture.rst
+++ b/doc/source/devref/architecture.rst
@@ -45,7 +45,7 @@ Below you will find a helpful explanation of the different components.
* Web Dashboard: potential external component that talks to the api
* api: component that receives http requests, converts commands and communicates with other components via the queue or http (in the case of objectstore)
* Auth Manager: component responsible for users/projects/and roles. Can backend to DB or LDAP. This is not a separate binary, but rather a python class that is used by most components in the system.
-* objectstore: twisted http server that replicates s3 api and allows storage and retrieval of images
+* objectstore: http server that replicates s3 api and allows storage and retrieval of images
* scheduler: decides which host gets each vm and volume
* volume: manages dynamically attachable block devices.
* network: manages ip forwarding, bridges, and vlans
diff --git a/doc/source/devref/compute.rst b/doc/source/devref/compute.rst
index 31cc2037f..50397cbec 100644
--- a/doc/source/devref/compute.rst
+++ b/doc/source/devref/compute.rst
@@ -118,19 +118,6 @@ The :mod:`nova.virt.fake` Driver
:show-inheritance:
-Monitoring
-----------
-
-The :mod:`nova.compute.monitor` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.compute.monitor
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
Tests
-----
diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst
index f3c454d64..09f1eb2c2 100644
--- a/doc/source/devref/development.environment.rst
+++ b/doc/source/devref/development.environment.rst
@@ -51,7 +51,7 @@ To activate the Nova virtualenv for the extent of your current shell session
Also, make test will automatically use the virtualenv.
-If you don't want to create a virtualenv every time you branch (which takes a while as long as we have the large Twisted project as a dependency) you can reuse a single virtualenv for all branches.
+If you don't want to create a virtualenv every time you branch you can reuse a single virtualenv for all branches.
#. If you don't have a nova/ directory containing trunk/ and other branches, do so now.
#. Go into nova/trunk and install a virtualenv.
diff --git a/doc/source/devref/nova.rst b/doc/source/devref/nova.rst
index 093fbb3ee..beca99ecd 100644
--- a/doc/source/devref/nova.rst
+++ b/doc/source/devref/nova.rst
@@ -102,16 +102,6 @@ The :mod:`nova.test` Module
:show-inheritance:
-The :mod:`nova.twistd` Module
------------------------------
-
-.. automodule:: nova.twistd
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
The :mod:`nova.utils` Module
----------------------------
@@ -215,16 +205,6 @@ The :mod:`runtime_flags` Module
:show-inheritance:
-The :mod:`twistd_unittest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.twistd_unittest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
The :mod:`validator_unittest` Module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/nova/api/direct.py b/nova/api/direct.py
index ec79151b1..993815fc7 100644
--- a/nova/api/direct.py
+++ b/nova/api/direct.py
@@ -107,7 +107,8 @@ class DelegatedAuthMiddleware(wsgi.Middleware):
def process_request(self, request):
os_user = request.headers['X-OpenStack-User']
os_project = request.headers['X-OpenStack-Project']
- context_ref = context.RequestContext(user=os_user, project=os_project)
+ context_ref = context.RequestContext(user_id=os_user,
+ project_id=os_project)
request.environ['openstack.context'] = context_ref
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index cf1734281..af232edda 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -66,7 +66,7 @@ class RequestLogging(wsgi.Middleware):
else:
controller = None
action = None
- ctxt = request.environ.get('ec2.context', None)
+ ctxt = request.environ.get('nova.context', None)
delta = utils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
@@ -139,8 +139,7 @@ class Lockout(wsgi.Middleware):
class Authenticate(wsgi.Middleware):
-
- """Authenticate an EC2 request and add 'ec2.context' to WSGI environ."""
+ """Authenticate an EC2 request and add 'nova.context' to WSGI environ."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
@@ -157,8 +156,9 @@ class Authenticate(wsgi.Middleware):
auth_params.pop('Signature')
# Authenticate the request.
+ authman = manager.AuthManager()
try:
- (user, project) = manager.AuthManager().authenticate(
+ (user, project) = authman.authenticate(
access,
signature,
auth_params,
@@ -174,14 +174,17 @@ class Authenticate(wsgi.Middleware):
remote_address = req.remote_addr
if FLAGS.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
- ctxt = context.RequestContext(user=user,
- project=project,
+ roles = authman.get_active_roles(user, project)
+ ctxt = context.RequestContext(user_id=user.id,
+ project_id=project.id,
+ is_admin=user.is_admin(),
+ roles=roles,
remote_address=remote_address)
- req.environ['ec2.context'] = ctxt
+ req.environ['nova.context'] = ctxt
uname = user.name
pname = project.name
msg = _('Authenticated Request For %(uname)s:%(pname)s)') % locals()
- LOG.audit(msg, context=req.environ['ec2.context'])
+ LOG.audit(msg, context=req.environ['nova.context'])
return self.application
@@ -228,7 +231,7 @@ class Authorizer(wsgi.Middleware):
"""Authorize an EC2 API request.
Return a 401 if ec2.controller and ec2.action in WSGI environ may not be
- executed in ec2.context.
+ executed in nova.context.
"""
def __init__(self, application):
@@ -282,7 +285,7 @@ class Authorizer(wsgi.Middleware):
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
- context = req.environ['ec2.context']
+ context = req.environ['nova.context']
controller = req.environ['ec2.request'].controller.__class__.__name__
action = req.environ['ec2.request'].action
allowed_roles = self.action_roles[controller].get(action, ['none'])
@@ -295,28 +298,27 @@ class Authorizer(wsgi.Middleware):
def _matches_any_role(self, context, roles):
"""Return True if any role in roles is allowed in context."""
- if context.user.is_superuser():
+ if context.is_admin:
return True
if 'all' in roles:
return True
if 'none' in roles:
return False
- return any(context.project.has_role(context.user_id, role)
- for role in roles)
+ return any(role in context.roles for role in roles)
class Executor(wsgi.Application):
"""Execute an EC2 API request.
- Executes 'ec2.action' upon 'ec2.controller', passing 'ec2.context' and
+ Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
response, or a 400 upon failure.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
- context = req.environ['ec2.context']
+ context = req.environ['nova.context']
api_request = req.environ['ec2.request']
result = None
try:
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 868b98a31..9ab8aeb58 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -40,6 +40,7 @@ from nova.api.openstack import servers
from nova.api.openstack import server_metadata
from nova.api.openstack import shared_ip_groups
from nova.api.openstack import users
+from nova.api.openstack import versions
from nova.api.openstack import wsgi
from nova.api.openstack import zones
@@ -96,6 +97,7 @@ class APIRouter(base_wsgi.Router):
server_members['suspend'] = 'POST'
server_members['resume'] = 'POST'
server_members['rescue'] = 'POST'
+ server_members['migrate'] = 'POST'
server_members['unrescue'] = 'POST'
server_members['reset_network'] = 'POST'
server_members['inject_network_info'] = 'POST'
@@ -115,6 +117,10 @@ class APIRouter(base_wsgi.Router):
'select': 'POST',
'boot': 'POST'})
+ mapper.connect("versions", "/",
+ controller=versions.create_resource(version),
+ action='show')
+
mapper.resource("console", "consoles",
controller=consoles.create_resource(),
parent_resource=dict(member_name='server',
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 7c3e683d6..d42abe1f8 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -48,31 +48,35 @@ class AuthMiddleware(wsgi.Middleware):
def __call__(self, req):
if not self.has_authentication(req):
return self.authenticate(req)
- user = self.get_user_by_authentication(req)
- if not user:
+ user_id = self.get_user_by_authentication(req)
+ if not user_id:
token = req.headers["X-Auth-Token"]
- msg = _("%(user)s could not be found with token '%(token)s'")
+ msg = _("%(user_id)s could not be found with token '%(token)s'")
LOG.warn(msg % locals())
return faults.Fault(webob.exc.HTTPUnauthorized())
try:
- account = req.headers["X-Auth-Project-Id"]
+ project_id = req.headers["X-Auth-Project-Id"]
except KeyError:
# FIXME(usrleon): It needed only for compatibility
# while osapi clients don't use this header
- accounts = self.auth.get_projects(user=user)
- if accounts:
- account = accounts[0]
+ projects = self.auth.get_projects(user_id)
+ if projects:
+ project_id = projects[0].id
else:
return faults.Fault(webob.exc.HTTPUnauthorized())
- if not self.auth.is_admin(user) and \
- not self.auth.is_project_member(user, account):
- msg = _("%(user)s must be an admin or a member of %(account)s")
+ is_admin = self.auth.is_admin(user_id)
+ req.environ['nova.context'] = context.RequestContext(user_id,
+ project_id,
+ is_admin)
+ if not is_admin and not self.auth.is_project_member(user_id,
+ project_id):
+ msg = _("%(user_id)s must be an admin or a "
+ "member of %(project_id)s")
LOG.warn(msg % locals())
return faults.Fault(webob.exc.HTTPUnauthorized())
- req.environ['nova.context'] = context.RequestContext(user, account)
return self.application
def has_authentication(self, req):
@@ -133,7 +137,7 @@ class AuthMiddleware(wsgi.Middleware):
if delta.days >= 2:
self.db.auth_token_destroy(ctxt, token['token_hash'])
else:
- return self.auth.get_user(token['user_id'])
+ return token['user_id']
return None
def _authorize_user(self, username, key, req):
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index bd14a1389..efa4ab385 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -17,12 +17,14 @@
import re
from urlparse import urlparse
+from xml.dom import minidom
import webob
from nova import exception
from nova import flags
from nova import log as logging
+from nova.api.openstack import wsgi
LOG = logging.getLogger('nova.api.openstack.common')
@@ -192,3 +194,83 @@ def get_version_from_href(href):
except IndexError:
version = '1.0'
return version
+
+
+class MetadataXMLDeserializer(wsgi.MetadataXMLDeserializer):
+
+ def _extract_metadata_container(self, datastring):
+ dom = minidom.parseString(datastring)
+ metadata_node = self.find_first_child_named(dom, "metadata")
+ metadata = self.extract_metadata(metadata_node)
+ return {'body': {'metadata': metadata}}
+
+ def create(self, datastring):
+ return self._extract_metadata_container(datastring)
+
+ def update_all(self, datastring):
+ return self._extract_metadata_container(datastring)
+
+ def update(self, datastring):
+ dom = minidom.parseString(datastring)
+ metadata_item = self.extract_metadata(dom)
+ return {'body': {'meta': metadata_item}}
+
+
+class MetadataHeadersSerializer(wsgi.ResponseHeadersSerializer):
+
+ def delete(self, response, data):
+ response.status_int = 204
+
+
+class MetadataXMLSerializer(wsgi.XMLDictSerializer):
+ def __init__(self, xmlns=wsgi.XMLNS_V11):
+ super(MetadataXMLSerializer, self).__init__(xmlns=xmlns)
+
+ def _meta_item_to_xml(self, doc, key, value):
+ node = doc.createElement('meta')
+ doc.appendChild(node)
+ node.setAttribute('key', '%s' % key)
+ text = doc.createTextNode('%s' % value)
+ node.appendChild(text)
+ return node
+
+ def meta_list_to_xml(self, xml_doc, meta_items):
+ container_node = xml_doc.createElement('metadata')
+ for (key, value) in meta_items:
+ item_node = self._meta_item_to_xml(xml_doc, key, value)
+ container_node.appendChild(item_node)
+ return container_node
+
+ def _meta_list_to_xml_string(self, metadata_dict):
+ xml_doc = minidom.Document()
+ items = metadata_dict['metadata'].items()
+ container_node = self.meta_list_to_xml(xml_doc, items)
+ xml_doc.appendChild(container_node)
+ self._add_xmlns(container_node)
+ return xml_doc.toprettyxml(indent=' ', encoding='UTF-8')
+
+ def index(self, metadata_dict):
+ return self._meta_list_to_xml_string(metadata_dict)
+
+ def create(self, metadata_dict):
+ return self._meta_list_to_xml_string(metadata_dict)
+
+ def update_all(self, metadata_dict):
+ return self._meta_list_to_xml_string(metadata_dict)
+
+ def _meta_item_to_xml_string(self, meta_item_dict):
+ xml_doc = minidom.Document()
+ item_key, item_value = meta_item_dict.items()[0]
+ item_node = self._meta_item_to_xml(xml_doc, item_key, item_value)
+ xml_doc.appendChild(item_node)
+ self._add_xmlns(item_node)
+ return xml_doc.toprettyxml(indent=' ', encoding='UTF-8')
+
+ def show(self, meta_item_dict):
+ return self._meta_item_to_xml_string(meta_item_dict['meta'])
+
+ def update(self, meta_item_dict):
+ return self._meta_item_to_xml_string(meta_item_dict['meta'])
+
+ def default(self, *args, **kwargs):
+ return ''
diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py
index b4a211857..3d8049324 100644
--- a/nova/api/openstack/contrib/floating_ips.py
+++ b/nova/api/openstack/contrib/floating_ips.py
@@ -27,9 +27,9 @@ from nova.api.openstack import extensions
def _translate_floating_ip_view(floating_ip):
result = {'id': floating_ip['id'],
'ip': floating_ip['address']}
- if 'fixed_ip' in floating_ip:
+ try:
result['fixed_ip'] = floating_ip['fixed_ip']['address']
- else:
+ except (TypeError, KeyError):
result['fixed_ip'] = None
if 'instance' in floating_ip:
result['instance_id'] = floating_ip['instance']['id']
diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py
index f8317565e..a2d18d37e 100644
--- a/nova/api/openstack/create_instance_helper.py
+++ b/nova/api/openstack/create_instance_helper.py
@@ -20,6 +20,7 @@ import webob
from webob import exc
from xml.dom import minidom
+from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@@ -29,7 +30,6 @@ from nova import utils
from nova.compute import instance_types
from nova.api.openstack import wsgi
-from nova.auth import manager as auth_manager
LOG = logging.getLogger('nova.api.openstack.create_instance_helper')
@@ -80,13 +80,21 @@ class CreateInstanceHelper(object):
key_name = None
key_data = None
- key_pairs = auth_manager.AuthManager.get_key_pairs(context)
+ # TODO(vish): Key pair access should move into a common library
+ # instead of being accessed directly from the db.
+ key_pairs = db.key_pair_get_all_by_user(context.elevated(),
+ context.user_id)
if key_pairs:
key_pair = key_pairs[0]
key_name = key_pair['name']
key_data = key_pair['public_key']
image_href = self.controller._image_ref_from_req_data(body)
+ # If the image href was generated by nova api, strip image_href
+ # down to an id and use the default glance connection params
+
+ if str(image_href).startswith(req.application_url):
+ image_href = image_href.split('/').pop()
try:
image_service, image_id = nova.image.get_image_service(image_href)
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
@@ -188,7 +196,7 @@ class CreateInstanceHelper(object):
Overrides normal behavior in the case of xml content
"""
if request.content_type == "application/xml":
- deserializer = ServerCreateRequestXMLDeserializer()
+ deserializer = ServerXMLDeserializer()
return deserializer.deserialize(request.body)
else:
return self._deserialize(request.body, request.get_content_type())
@@ -293,6 +301,37 @@ class ServerXMLDeserializer(wsgi.MetadataXMLDeserializer):
and personality attributes
"""
+ def action(self, string):
+ dom = minidom.parseString(string)
+ action_node = dom.childNodes[0]
+ action_name = action_node.tagName
+
+ action_deserializer = {
+ 'createImage': self._action_create_image,
+ 'createBackup': self._action_create_backup,
+ }.get(action_name, self.default)
+
+ action_data = action_deserializer(action_node)
+
+ return {'body': {action_name: action_data}}
+
+ def _action_create_image(self, node):
+ return self._deserialize_image_action(node, ('name',))
+
+ def _action_create_backup(self, node):
+ attributes = ('name', 'backup_type', 'rotation')
+ return self._deserialize_image_action(node, attributes)
+
+ def _deserialize_image_action(self, node, allowed_attributes):
+ data = {}
+ for attribute in allowed_attributes:
+ value = node.getAttribute(attribute)
+ if value:
+ data[attribute] = value
+ metadata_node = self.find_first_child_named(node, 'metadata')
+ data['metadata'] = self.extract_metadata(metadata_node)
+ return data
+
def create(self, string):
"""Deserialize an xml-formatted server create request"""
dom = minidom.parseString(string)
@@ -303,29 +342,29 @@ class ServerXMLDeserializer(wsgi.MetadataXMLDeserializer):
"""Marshal the server attribute of a parsed request"""
server = {}
server_node = self.find_first_child_named(node, 'server')
- for attr in ["name", "imageId", "flavorId", "imageRef", "flavorRef"]:
+
+ attributes = ["name", "imageId", "flavorId", "imageRef",
+ "flavorRef", "adminPass"]
+ for attr in attributes:
if server_node.getAttribute(attr):
server[attr] = server_node.getAttribute(attr)
+
metadata_node = self.find_first_child_named(server_node, "metadata")
- metadata = self.extract_metadata(metadata_node)
- if metadata is not None:
- server["metadata"] = metadata
- personality = self._extract_personality(server_node)
- if personality is not None:
- server["personality"] = personality
+ server["metadata"] = self.extract_metadata(metadata_node)
+
+ server["personality"] = self._extract_personality(server_node)
+
return server
def _extract_personality(self, server_node):
"""Marshal the personality attribute of a parsed request"""
- personality_node = \
- self.find_first_child_named(server_node, "personality")
- if personality_node is None:
- return None
+ node = self.find_first_child_named(server_node, "personality")
personality = []
- for file_node in self.find_children_named(personality_node, "file"):
- item = {}
- if file_node.hasAttribute("path"):
- item["path"] = file_node.getAttribute("path")
- item["contents"] = self.extract_text(file_node)
- personality.append(item)
+ if node is not None:
+ for file_node in self.find_children_named(node, "file"):
+ item = {}
+ if file_node.hasAttribute("path"):
+ item["path"] = file_node.getAttribute("path")
+ item["contents"] = self.extract_text(file_node)
+ personality.append(item)
return personality
diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py
index ee181c924..aaf64a123 100644
--- a/nova/api/openstack/image_metadata.py
+++ b/nova/api/openstack/image_metadata.py
@@ -16,12 +16,12 @@
# under the License.
from webob import exc
-from xml.dom import minidom
from nova import flags
from nova import image
from nova import quota
from nova import utils
+from nova.api.openstack import common
from nova.api.openstack import wsgi
@@ -118,95 +118,15 @@ class Controller(object):
self.image_service.update(context, image_id, img, None)
-class ImageMetadataXMLDeserializer(wsgi.MetadataXMLDeserializer):
-
- def _extract_metadata_container(self, datastring):
- dom = minidom.parseString(datastring)
- metadata_node = self.find_first_child_named(dom, "metadata")
- metadata = self.extract_metadata(metadata_node)
- return {'body': {'metadata': metadata}}
-
- def create(self, datastring):
- return self._extract_metadata_container(datastring)
-
- def update_all(self, datastring):
- return self._extract_metadata_container(datastring)
-
- def update(self, datastring):
- dom = minidom.parseString(datastring)
- metadata_item = self.extract_metadata(dom)
- return {'body': {'meta': metadata_item}}
-
-
-class HeadersSerializer(wsgi.ResponseHeadersSerializer):
-
- def delete(self, response, data):
- response.status_int = 204
-
-
-class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
- def __init__(self, xmlns=wsgi.XMLNS_V11):
- super(ImageMetadataXMLSerializer, self).__init__(xmlns=xmlns)
-
- def _meta_item_to_xml(self, doc, key, value):
- node = doc.createElement('meta')
- doc.appendChild(node)
- node.setAttribute('key', '%s' % key)
- text = doc.createTextNode('%s' % value)
- node.appendChild(text)
- return node
-
- def meta_list_to_xml(self, xml_doc, meta_items):
- container_node = xml_doc.createElement('metadata')
- for (key, value) in meta_items:
- item_node = self._meta_item_to_xml(xml_doc, key, value)
- container_node.appendChild(item_node)
- return container_node
-
- def _meta_list_to_xml_string(self, metadata_dict):
- xml_doc = minidom.Document()
- items = metadata_dict['metadata'].items()
- container_node = self.meta_list_to_xml(xml_doc, items)
- xml_doc.appendChild(container_node)
- self._add_xmlns(container_node)
- return xml_doc.toprettyxml(indent=' ', encoding='UTF-8')
-
- def index(self, metadata_dict):
- return self._meta_list_to_xml_string(metadata_dict)
-
- def create(self, metadata_dict):
- return self._meta_list_to_xml_string(metadata_dict)
-
- def update_all(self, metadata_dict):
- return self._meta_list_to_xml_string(metadata_dict)
-
- def _meta_item_to_xml_string(self, meta_item_dict):
- xml_doc = minidom.Document()
- item_key, item_value = meta_item_dict.items()[0]
- item_node = self._meta_item_to_xml(xml_doc, item_key, item_value)
- xml_doc.appendChild(item_node)
- self._add_xmlns(item_node)
- return xml_doc.toprettyxml(indent=' ', encoding='UTF-8')
-
- def show(self, meta_item_dict):
- return self._meta_item_to_xml_string(meta_item_dict['meta'])
-
- def update(self, meta_item_dict):
- return self._meta_item_to_xml_string(meta_item_dict['meta'])
-
- def default(self, *args, **kwargs):
- return ''
-
-
def create_resource():
- headers_serializer = HeadersSerializer()
+ headers_serializer = common.MetadataHeadersSerializer()
body_deserializers = {
- 'application/xml': ImageMetadataXMLDeserializer(),
+ 'application/xml': common.MetadataXMLDeserializer(),
}
body_serializers = {
- 'application/xml': ImageMetadataXMLSerializer(),
+ 'application/xml': common.MetadataXMLSerializer(),
}
serializer = wsgi.ResponseSerializer(body_serializers, headers_serializer)
deserializer = wsgi.RequestDeserializer(body_deserializers)
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 30e4fd389..0834adfa5 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -98,79 +98,34 @@ class Controller(object):
self._image_service.delete(context, id)
return webob.exc.HTTPNoContent()
- def create(self, req, body):
- """Snapshot or backup a server instance and save the image.
-
- Images now have an `image_type` associated with them, which can be
- 'snapshot' or the backup type, like 'daily' or 'weekly'.
-
- If the image_type is backup-like, then the rotation factor can be
- included and that will cause the oldest backups that exceed the
- rotation factor to be deleted.
-
- :param req: `wsgi.Request` object
- """
- def get_param(param):
- try:
- return body["image"][param]
- except KeyError:
- raise webob.exc.HTTPBadRequest(explanation="Missing required "
- "param: %s" % param)
-
- context = req.environ['nova.context']
- content_type = req.get_content_type()
-
- if not body:
- raise webob.exc.HTTPBadRequest()
-
- image_type = body["image"].get("image_type", "snapshot")
-
- try:
- server_id = self._server_id_from_req(req, body)
- except KeyError:
- raise webob.exc.HTTPBadRequest()
-
- image_name = get_param("name")
- props = self._get_extra_properties(req, body)
-
- if image_type == "snapshot":
- image = self._compute_service.snapshot(
- context, server_id, image_name,
- extra_properties=props)
- elif image_type == "backup":
- # NOTE(sirp): Unlike snapshot, backup is not a customer facing
- # API call; rather, it's used by the internal backup scheduler
- if not FLAGS.allow_admin_api:
- raise webob.exc.HTTPBadRequest(
- explanation="Admin API Required")
-
- backup_type = get_param("backup_type")
- rotation = int(get_param("rotation"))
-
- image = self._compute_service.backup(
- context, server_id, image_name,
- backup_type, rotation, extra_properties=props)
- else:
- LOG.error(_("Invalid image_type '%s' passed") % image_type)
- raise webob.exc.HTTPBadRequest(explanation="Invalue image_type: "
- "%s" % image_type)
-
- return dict(image=self.get_builder(req).build(image, detail=True))
-
def get_builder(self, request):
"""Indicates that you must use a Controller subclass."""
raise NotImplementedError()
- def _server_id_from_req(self, req, data):
- raise NotImplementedError()
-
- def _get_extra_properties(self, req, data):
- return {}
-
class ControllerV10(Controller):
"""Version 1.0 specific controller logic."""
+ def create(self, req, body):
+ """Snapshot a server instance and save the image."""
+ try:
+ image = body["image"]
+ except (KeyError, TypeError):
+ msg = _("Invalid image entity")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ try:
+ image_name = image["name"]
+ server_id = image["serverId"]
+ except KeyError as missing_key:
+ msg = _("Image entity requires %s") % missing_key
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ context = req.environ["nova.context"]
+ image = self._compute_service.snapshot(context, server_id, image_name)
+
+ return dict(image=self.get_builder(req).build(image, detail=True))
+
def get_builder(self, request):
"""Property to get the ViewBuilder class we need to use."""
base_url = request.application_url
@@ -202,13 +157,6 @@ class ControllerV10(Controller):
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=True) for image in images])
- def _server_id_from_req(self, req, data):
- try:
- return data['image']['serverId']
- except KeyError:
- msg = _("Expected serverId attribute on server entity.")
- raise webob.exc.HTTPBadRequest(explanation=msg)
-
class ControllerV11(Controller):
"""Version 1.1 specific controller logic."""
@@ -246,37 +194,8 @@ class ControllerV11(Controller):
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=True) for image in images])
- def _server_id_from_req(self, req, data):
- try:
- server_ref = data['image']['serverRef']
- except KeyError:
- msg = _("Expected serverRef attribute on server entity.")
- raise webob.exc.HTTPBadRequest(explanation=msg)
-
- if not server_ref.startswith('http'):
- return server_ref
-
- passed = urlparse.urlparse(server_ref)
- expected = urlparse.urlparse(req.application_url)
- version = expected.path.split('/')[1]
- expected_prefix = "/%s/servers/" % version
- _empty, _sep, server_id = passed.path.partition(expected_prefix)
- scheme_ok = passed.scheme == expected.scheme
- host_ok = passed.hostname == expected.hostname
- port_ok = (passed.port == expected.port or
- passed.port == FLAGS.osapi_port)
- if not (scheme_ok and port_ok and host_ok and server_id):
- msg = _("serverRef must match request url")
- raise webob.exc.HTTPBadRequest(explanation=msg)
-
- return server_id
-
- def _get_extra_properties(self, req, data):
- server_ref = data['image']['serverRef']
- if not server_ref.startswith('http'):
- server_ref = os.path.join(req.application_url, 'servers',
- server_ref)
- return {'instance_ref': server_ref}
+ def create(self, *args, **kwargs):
+ raise webob.exc.HTTPMethodNotAllowed()
class ImageXMLSerializer(wsgi.XMLDictSerializer):
@@ -284,7 +203,7 @@ class ImageXMLSerializer(wsgi.XMLDictSerializer):
xmlns = wsgi.XMLNS_V11
def __init__(self):
- self.metadata_serializer = image_metadata.ImageMetadataXMLSerializer()
+ self.metadata_serializer = common.MetadataXMLSerializer()
def _image_to_xml(self, xml_doc, image):
image_node = xml_doc.createElement('image')
@@ -369,12 +288,6 @@ class ImageXMLSerializer(wsgi.XMLDictSerializer):
image_dict['image'])
return self.to_xml_string(node, True)
- def create(self, image_dict):
- xml_doc = minidom.Document()
- node = self._image_to_xml_detailed(xml_doc,
- image_dict['image'])
- return self.to_xml_string(node, True)
-
def create_resource(version='1.0'):
controller = {
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index d7cabb067..002b47edb 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -14,26 +14,29 @@
# under the License.
import base64
+import os
import traceback
from webob import exc
+from xml.dom import minidom
import webob
from nova import compute
-from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.api.openstack import common
from nova.api.openstack import create_instance_helper as helper
+from nova.api.openstack import ips
+from nova.api.openstack import wsgi
+from nova.compute import instance_types
+from nova.scheduler import api as scheduler_api
+import nova.api.openstack
import nova.api.openstack.views.addresses
import nova.api.openstack.views.flavors
import nova.api.openstack.views.images
import nova.api.openstack.views.servers
-from nova.api.openstack import wsgi
-import nova.api.openstack
-from nova.scheduler import api as scheduler_api
LOG = logging.getLogger('nova.api.openstack.servers')
@@ -152,23 +155,94 @@ class Controller(object):
@scheduler_api.redirect_handler
def action(self, req, id, body):
- """Multi-purpose method used to reboot, rebuild, or
- resize a server"""
+ """Multi-purpose method used to take actions on a server"""
- actions = {
+ self.actions = {
'changePassword': self._action_change_password,
'reboot': self._action_reboot,
'resize': self._action_resize,
'confirmResize': self._action_confirm_resize,
'revertResize': self._action_revert_resize,
'rebuild': self._action_rebuild,
- 'migrate': self._action_migrate}
+ 'createImage': self._action_create_image,
+ }
- for key in actions.keys():
+ if FLAGS.allow_admin_api:
+ admin_actions = {
+ 'createBackup': self._action_create_backup,
+ }
+ self.actions.update(admin_actions)
+
+ for key in self.actions.keys():
if key in body:
- return actions[key](body, req, id)
+ return self.actions[key](body, req, id)
+
raise exc.HTTPNotImplemented()
+ def _action_create_backup(self, input_dict, req, instance_id):
+ """Backup a server instance.
+
+ Images now have an `image_type` associated with them, which can be
+ 'snapshot' or the backup type, like 'daily' or 'weekly'.
+
+ If the image_type is backup-like, then the rotation factor can be
+ included and that will cause the oldest backups that exceed the
+ rotation factor to be deleted.
+
+ """
+ entity = input_dict["createBackup"]
+
+ try:
+ image_name = entity["name"]
+ backup_type = entity["backup_type"]
+ rotation = entity["rotation"]
+
+ except KeyError as missing_key:
+ msg = _("createBackup entity requires %s attribute") % missing_key
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ except TypeError:
+ msg = _("Malformed createBackup entity")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ try:
+ rotation = int(rotation)
+ except ValueError:
+ msg = _("createBackup attribute 'rotation' must be an integer")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ # preserve link to server in image properties
+ server_ref = os.path.join(req.application_url,
+ 'servers',
+ str(instance_id))
+ props = {'instance_ref': server_ref}
+
+ metadata = entity.get('metadata', {})
+ try:
+ props.update(metadata)
+ except ValueError:
+ msg = _("Invalid metadata")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ context = req.environ["nova.context"]
+ image = self.compute_api.backup(context,
+ instance_id,
+ image_name,
+ backup_type,
+ rotation,
+ extra_properties=props)
+
+ # build location of newly-created image entity
+ image_id = str(image['id'])
+ image_ref = os.path.join(req.application_url, 'images', image_id)
+
+ resp = webob.Response(status_int=202)
+ resp.headers['Location'] = image_ref
+ return resp
+
+ def _action_create_image(self, input_dict, req, id):
+ return exc.HTTPNotImplemented()
+
def _action_change_password(self, input_dict, req, id):
return exc.HTTPNotImplemented()
@@ -206,14 +280,6 @@ class Controller(object):
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
- def _action_migrate(self, input_dict, req, id):
- try:
- self.compute_api.resize(req.environ['nova.context'], id)
- except Exception, e:
- LOG.exception(_("Error in migrate %s"), e)
- raise exc.HTTPBadRequest()
- return webob.Response(status_int=202)
-
@scheduler_api.redirect_handler
def lock(self, req, id):
"""
@@ -340,6 +406,15 @@ class Controller(object):
return webob.Response(status_int=202)
@scheduler_api.redirect_handler
+ def migrate(self, req, id):
+ try:
+ self.compute_api.resize(req.environ['nova.context'], id)
+ except Exception, e:
+ LOG.exception(_("Error in migrate %s"), e)
+ raise exc.HTTPBadRequest()
+ return webob.Response(status_int=202)
+
+ @scheduler_api.redirect_handler
def rescue(self, req, id):
"""Permit users to rescue the server."""
context = req.environ["nova.context"]
@@ -403,6 +478,24 @@ class Controller(object):
error=item.error))
return dict(actions=actions)
+ def resize(self, req, instance_id, flavor_id):
+ """Begin the resize process with given instance/flavor."""
+ context = req.environ["nova.context"]
+
+ try:
+ self.compute_api.resize(context, instance_id, flavor_id)
+ except exception.FlavorNotFound:
+ msg = _("Unable to locate requested flavor.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except exception.CannotResizeToSameSize:
+ msg = _("Resize requires a change in size.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except exception.CannotResizeToSmallerSize:
+ msg = _("Resizing to a smaller size is not supported.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ return webob.Response(status_int=202)
+
class ControllerV10(Controller):
@@ -436,14 +529,13 @@ class ControllerV10(Controller):
def _action_resize(self, input_dict, req, id):
""" Resizes a given instance to the flavor size requested """
- if 'resize' in input_dict and 'flavorId' in input_dict['resize']:
- flavor_id = input_dict['resize']['flavorId']
- self.compute_api.resize(req.environ['nova.context'], id,
- flavor_id)
- else:
- LOG.exception(_("Missing 'flavorId' argument for resize"))
- raise exc.HTTPUnprocessableEntity()
- return webob.Response(status_int=202)
+ try:
+ flavor_id = input_dict["resize"]["flavorId"]
+ except (KeyError, TypeError):
+ msg = _("Resize requests require 'flavorId' attribute.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ return self.resize(req, id, flavor_id)
def _action_rebuild(self, info, request, instance_id):
context = request.environ['nova.context']
@@ -480,11 +572,20 @@ class ControllerV11(Controller):
raise exc.HTTPNotFound()
def _image_ref_from_req_data(self, data):
- return data['server']['imageRef']
+ try:
+ return data['server']['imageRef']
+ except (TypeError, KeyError):
+ msg = _("Missing imageRef attribute")
+ raise exc.HTTPBadRequest(explanation=msg)
def _flavor_id_from_req_data(self, data):
- href = data['server']['flavorRef']
- return common.get_id_from_href(href)
+ try:
+ flavor_ref = data['server']['flavorRef']
+ except (TypeError, KeyError):
+ msg = _("Missing flavorRef attribute")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ return common.get_id_from_href(flavor_ref)
def _build_view(self, req, instance, is_detail=False):
base_url = req.application_url
@@ -544,18 +645,12 @@ class ControllerV11(Controller):
def _action_resize(self, input_dict, req, id):
""" Resizes a given instance to the flavor size requested """
try:
- if 'resize' in input_dict and 'flavorRef' in input_dict['resize']:
- flavor_ref = input_dict['resize']['flavorRef']
- flavor_id = common.get_id_from_href(flavor_ref)
- self.compute_api.resize(req.environ['nova.context'], id,
- flavor_id)
- else:
- LOG.exception(_("Missing 'flavorRef' argument for resize"))
- raise exc.HTTPUnprocessableEntity()
- except Exception, e:
- LOG.exception(_("Error in resize %s"), e)
- raise exc.HTTPBadRequest()
- return webob.Response(status_int=202)
+ flavor_ref = input_dict["resize"]["flavorRef"]
+ except (KeyError, TypeError):
+ msg = _("Resize requests require 'flavorRef' attribute.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ return self.resize(req, id, flavor_ref)
def _action_rebuild(self, info, request, instance_id):
context = request.environ['nova.context']
@@ -585,6 +680,48 @@ class ControllerV11(Controller):
return webob.Response(status_int=202)
+ def _action_create_image(self, input_dict, req, instance_id):
+ """Snapshot a server instance."""
+ entity = input_dict.get("createImage", {})
+
+ try:
+ image_name = entity["name"]
+
+ except KeyError:
+ msg = _("createImage entity requires name attribute")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ except TypeError:
+ msg = _("Malformed createImage entity")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ # preserve link to server in image properties
+ server_ref = os.path.join(req.application_url,
+ 'servers',
+ str(instance_id))
+ props = {'instance_ref': server_ref}
+
+ metadata = entity.get('metadata', {})
+ try:
+ props.update(metadata)
+ except ValueError:
+ msg = _("Invalid metadata")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ context = req.environ['nova.context']
+ image = self.compute_api.snapshot(context,
+ instance_id,
+ image_name,
+ extra_properties=props)
+
+ # build location of newly-created image entity
+ image_id = str(image['id'])
+ image_ref = os.path.join(req.application_url, 'images', image_id)
+
+ resp = webob.Response(status_int=202)
+ resp.headers['Location'] = image_ref
+ return resp
+
def get_default_xmlns(self, req):
return common.XML_NS_V11
@@ -599,6 +736,123 @@ class HeadersSerializer(wsgi.ResponseHeadersSerializer):
response.status_int = 204
+class ServerXMLSerializer(wsgi.XMLDictSerializer):
+
+ xmlns = wsgi.XMLNS_V11
+
+ def __init__(self):
+ self.metadata_serializer = common.MetadataXMLSerializer()
+ self.addresses_serializer = ips.IPXMLSerializer()
+
+ def _create_basic_entity_node(self, xml_doc, id, links, name):
+ basic_node = xml_doc.createElement(name)
+ basic_node.setAttribute('id', str(id))
+ link_nodes = self._create_link_nodes(xml_doc, links)
+ for link_node in link_nodes:
+ basic_node.appendChild(link_node)
+ return basic_node
+
+ def _create_metadata_node(self, xml_doc, metadata):
+ return self.metadata_serializer.meta_list_to_xml(xml_doc, metadata)
+
+ def _create_addresses_node(self, xml_doc, addresses):
+ return self.addresses_serializer.networks_to_xml(xml_doc, addresses)
+
+ def _add_server_attributes(self, node, server):
+ node.setAttribute('id', str(server['id']))
+ node.setAttribute('uuid', str(server['uuid']))
+ node.setAttribute('hostId', str(server['hostId']))
+ node.setAttribute('name', server['name'])
+ node.setAttribute('created', str(server['created']))
+ node.setAttribute('updated', str(server['updated']))
+ node.setAttribute('status', server['status'])
+ if 'progress' in server:
+ node.setAttribute('progress', str(server['progress']))
+
+ def _server_to_xml(self, xml_doc, server):
+ server_node = xml_doc.createElement('server')
+ server_node.setAttribute('id', str(server['id']))
+ server_node.setAttribute('name', server['name'])
+ link_nodes = self._create_link_nodes(xml_doc,
+ server['links'])
+ for link_node in link_nodes:
+ server_node.appendChild(link_node)
+ return server_node
+
+ def _server_to_xml_detailed(self, xml_doc, server):
+ server_node = xml_doc.createElement('server')
+ self._add_server_attributes(server_node, server)
+
+ link_nodes = self._create_link_nodes(xml_doc,
+ server['links'])
+ for link_node in link_nodes:
+ server_node.appendChild(link_node)
+
+ if 'image' in server:
+ image_node = self._create_basic_entity_node(xml_doc,
+ server['image']['id'],
+ server['image']['links'],
+ 'image')
+ server_node.appendChild(image_node)
+
+ if 'flavor' in server:
+ flavor_node = self._create_basic_entity_node(xml_doc,
+ server['flavor']['id'],
+ server['flavor']['links'],
+ 'flavor')
+ server_node.appendChild(flavor_node)
+
+ metadata = server.get('metadata', {}).items()
+ if len(metadata) > 0:
+ metadata_node = self._create_metadata_node(xml_doc, metadata)
+ server_node.appendChild(metadata_node)
+
+ addresses_node = self._create_addresses_node(xml_doc,
+ server['addresses'])
+ server_node.appendChild(addresses_node)
+
+ return server_node
+
+ def _server_list_to_xml(self, xml_doc, servers, detailed):
+ container_node = xml_doc.createElement('servers')
+ if detailed:
+ server_to_xml = self._server_to_xml_detailed
+ else:
+ server_to_xml = self._server_to_xml
+
+ for server in servers:
+ item_node = server_to_xml(xml_doc, server)
+ container_node.appendChild(item_node)
+ return container_node
+
+ def index(self, servers_dict):
+ xml_doc = minidom.Document()
+ node = self._server_list_to_xml(xml_doc,
+ servers_dict['servers'],
+ detailed=False)
+ return self.to_xml_string(node, True)
+
+ def detail(self, servers_dict):
+ xml_doc = minidom.Document()
+ node = self._server_list_to_xml(xml_doc,
+ servers_dict['servers'],
+ detailed=True)
+ return self.to_xml_string(node, True)
+
+ def show(self, server_dict):
+ xml_doc = minidom.Document()
+ node = self._server_to_xml_detailed(xml_doc,
+ server_dict['server'])
+ return self.to_xml_string(node, True)
+
+ def create(self, server_dict):
+ xml_doc = minidom.Document()
+ node = self._server_to_xml_detailed(xml_doc,
+ server_dict['server'])
+ node.setAttribute('adminPass', server_dict['server']['adminPass'])
+ return self.to_xml_string(node, True)
+
+
def create_resource(version='1.0'):
controller = {
'1.0': ControllerV10,
@@ -628,9 +882,13 @@ def create_resource(version='1.0'):
headers_serializer = HeadersSerializer()
+ xml_serializer = {
+ '1.0': wsgi.XMLDictSerializer(metadata, wsgi.XMLNS_V10),
+ '1.1': ServerXMLSerializer(),
+ }[version]
+
body_serializers = {
- 'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
- xmlns=xmlns),
+ 'application/xml': xml_serializer,
}
body_deserializers = {
diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py
index df7a94b7e..3ef72b7f6 100644
--- a/nova/api/openstack/versions.py
+++ b/nova/api/openstack/versions.py
@@ -24,7 +24,66 @@ import nova.api.openstack.views.versions
from nova.api.openstack import wsgi
-ATOM_XMLNS = "http://www.w3.org/2005/Atom"
+VERSIONS = {
+ "v1.0": {
+ "id": "v1.0",
+ "status": "DEPRECATED",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.0/cs-devguide-20110125.pdf"
+ },
+ {
+ "rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.0/application.wadl"
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute-v1.0+xml"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute-v1.0+json"
+ }
+ ],
+ },
+ "v1.1": {
+ "id": "v1.1",
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.1/cs-devguide-20110125.pdf"
+ },
+ {
+ "rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.1/application.wadl"
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute-v1.1+xml"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute-v1.1+json"
+ }
+ ],
+ },
+}
class Versions(wsgi.Resource):
@@ -36,16 +95,20 @@ class Versions(wsgi.Resource):
}
}
+ headers_serializer = VersionsHeadersSerializer()
+
body_serializers = {
'application/atom+xml': VersionsAtomSerializer(metadata=metadata),
'application/xml': VersionsXMLSerializer(metadata=metadata),
}
- serializer = wsgi.ResponseSerializer(body_serializers)
+ serializer = wsgi.ResponseSerializer(
+ body_serializers=body_serializers,
+ headers_serializer=headers_serializer)
supported_content_types = ('application/json',
'application/xml',
'application/atom+xml')
- deserializer = wsgi.RequestDeserializer(
+ deserializer = VersionsRequestDeserializer(
supported_content_types=supported_content_types)
wsgi.Resource.__init__(self, None, serializer=serializer,
@@ -53,60 +116,131 @@ class Versions(wsgi.Resource):
def dispatch(self, request, *args):
"""Respond to a request for all OpenStack API versions."""
- version_objs = [
- {
- "id": "v1.1",
- "status": "CURRENT",
- #TODO(wwolf) get correct value for these
- "updated": "2011-07-18T11:30:00Z",
- },
- {
- "id": "v1.0",
- "status": "DEPRECATED",
- #TODO(wwolf) get correct value for these
- "updated": "2010-10-09T11:30:00Z",
- },
- ]
-
builder = nova.api.openstack.views.versions.get_view_builder(request)
- versions = [builder.build(version) for version in version_objs]
- return dict(versions=versions)
+ if request.path == '/':
+ # List Versions
+ return builder.build_versions(VERSIONS)
+ else:
+ # Versions Multiple Choice
+ return builder.build_choices(VERSIONS, request)
+
+
+class VersionV10(object):
+ def show(self, req):
+ builder = nova.api.openstack.views.versions.get_view_builder(req)
+ return builder.build_version(VERSIONS['v1.0'])
+
+
+class VersionV11(object):
+ def show(self, req):
+ builder = nova.api.openstack.views.versions.get_view_builder(req)
+ return builder.build_version(VERSIONS['v1.1'])
+
+
+class VersionsRequestDeserializer(wsgi.RequestDeserializer):
+ def get_expected_content_type(self, request):
+ supported_content_types = list(self.supported_content_types)
+ if request.path != '/':
+ # Remove atom+xml accept type for 300 responses
+ if 'application/atom+xml' in supported_content_types:
+ supported_content_types.remove('application/atom+xml')
+
+ return request.best_match_content_type(supported_content_types)
+
+ def get_action_args(self, request_environment):
+ """Parse dictionary created by routes library."""
+ args = {}
+ if request_environment['PATH_INFO'] == '/':
+ args['action'] = 'index'
+ else:
+ args['action'] = 'multi'
+
+ return args
class VersionsXMLSerializer(wsgi.XMLDictSerializer):
- def _versions_to_xml(self, versions):
- root = self._xml_doc.createElement('versions')
+ #TODO(wwolf): this is temporary until we get rid of toprettyxml
+ # in the base class (XMLDictSerializer), which I plan to do in
+ # another branch
+ def to_xml_string(self, node, has_atom=False):
+ self._add_xmlns(node, has_atom)
+ return node.toxml(encoding='UTF-8')
+
+ def _versions_to_xml(self, versions, name="versions", xmlns=None):
+ root = self._xml_doc.createElement(name)
+ root.setAttribute("xmlns", wsgi.XMLNS_V11)
+ root.setAttribute("xmlns:atom", wsgi.XMLNS_ATOM)
for version in versions:
root.appendChild(self._create_version_node(version))
return root
- def _create_version_node(self, version):
+ def _create_media_types(self, media_types):
+ base = self._xml_doc.createElement('media-types')
+ for type in media_types:
+ node = self._xml_doc.createElement('media-type')
+ node.setAttribute('base', type['base'])
+ node.setAttribute('type', type['type'])
+ base.appendChild(node)
+
+ return base
+
+ def _create_version_node(self, version, create_ns=False):
version_node = self._xml_doc.createElement('version')
+ if create_ns:
+ xmlns = wsgi.XMLNS_V11
+ xmlns_atom = wsgi.XMLNS_ATOM
+ version_node.setAttribute('xmlns', xmlns)
+ version_node.setAttribute('xmlns:atom', xmlns_atom)
+
version_node.setAttribute('id', version['id'])
version_node.setAttribute('status', version['status'])
- version_node.setAttribute('updated', version['updated'])
+ if 'updated' in version:
+ version_node.setAttribute('updated', version['updated'])
+
+ if 'media-types' in version:
+ media_types = self._create_media_types(version['media-types'])
+ version_node.appendChild(media_types)
- for link in version['links']:
- link_node = self._xml_doc.createElement('atom:link')
- link_node.setAttribute('rel', link['rel'])
- link_node.setAttribute('href', link['href'])
- version_node.appendChild(link_node)
+ link_nodes = self._create_link_nodes(self._xml_doc, version['links'])
+ for link in link_nodes:
+ version_node.appendChild(link)
return version_node
- def default(self, data):
+ def index(self, data):
self._xml_doc = minidom.Document()
node = self._versions_to_xml(data['versions'])
return self.to_xml_string(node)
+ def show(self, data):
+ self._xml_doc = minidom.Document()
+ node = self._create_version_node(data['version'], True)
+
+ return self.to_xml_string(node)
+
+ def multi(self, data):
+ self._xml_doc = minidom.Document()
+ node = self._versions_to_xml(data['choices'], 'choices',
+ xmlns=wsgi.XMLNS_V11)
+
+ return self.to_xml_string(node)
+
class VersionsAtomSerializer(wsgi.XMLDictSerializer):
+ #TODO(wwolf): this is temporary until we get rid of toprettyxml
+ # in the base class (XMLDictSerializer), which I plan to do in
+ # another branch
+ def to_xml_string(self, node, has_atom=False):
+ self._add_xmlns(node, has_atom)
+ return node.toxml(encoding='UTF-8')
+
def __init__(self, metadata=None, xmlns=None):
+ self.metadata = metadata or {}
if not xmlns:
- self.xmlns = ATOM_XMLNS
+ self.xmlns = wsgi.XMLNS_ATOM
else:
self.xmlns = xmlns
@@ -135,8 +269,33 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer):
link_href = link_href.rstrip('/')
return link_href.rsplit('/', 1)[0] + '/'
- def _create_meta(self, root, versions):
- title = self._create_text_elem('title', 'Available API Versions',
+ def _create_detail_meta(self, root, version):
+ title = self._create_text_elem('title', "About This Version",
+ type='text')
+
+ updated = self._create_text_elem('updated', version['updated'])
+
+ uri = version['links'][0]['href']
+ id = self._create_text_elem('id', uri)
+
+ link = self._xml_doc.createElement('link')
+ link.setAttribute('rel', 'self')
+ link.setAttribute('href', uri)
+
+ author = self._xml_doc.createElement('author')
+ author_name = self._create_text_elem('name', 'Rackspace')
+ author_uri = self._create_text_elem('uri', 'http://www.rackspace.com/')
+ author.appendChild(author_name)
+ author.appendChild(author_uri)
+
+ root.appendChild(title)
+ root.appendChild(updated)
+ root.appendChild(id)
+ root.appendChild(author)
+ root.appendChild(link)
+
+ def _create_list_meta(self, root, versions):
+ title = self._create_text_elem('title', "Available API Versions",
type='text')
# Set this updated to the most recently updated version
recent = self._get_most_recent_update(versions)
@@ -144,6 +303,7 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer):
base_url = self._get_base_url(versions[0]['links'][0]['href'])
id = self._create_text_elem('id', base_url)
+
link = self._xml_doc.createElement('link')
link.setAttribute('rel', 'self')
link.setAttribute('href', base_url)
@@ -178,7 +338,10 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer):
link_node = self._xml_doc.createElement('link')
link_node.setAttribute('rel', link['rel'])
link_node.setAttribute('href', link['href'])
- entry.appendChild(link_node)
+ if 'type' in link:
+ link_node.setAttribute('type', link['type'])
+
+ entry.appendChild(link_node)
content = self._create_text_elem('content',
'Version %s %s (%s)' %
@@ -190,10 +353,45 @@ class VersionsAtomSerializer(wsgi.XMLDictSerializer):
entry.appendChild(content)
root.appendChild(entry)
- def default(self, data):
+ def index(self, data):
self._xml_doc = minidom.Document()
node = self._xml_doc.createElementNS(self.xmlns, 'feed')
- self._create_meta(node, data['versions'])
+ self._create_list_meta(node, data['versions'])
self._create_version_entries(node, data['versions'])
return self.to_xml_string(node)
+
+ def show(self, data):
+ self._xml_doc = minidom.Document()
+ node = self._xml_doc.createElementNS(self.xmlns, 'feed')
+ self._create_detail_meta(node, data['version'])
+ self._create_version_entries(node, [data['version']])
+
+ return self.to_xml_string(node)
+
+
+class VersionsHeadersSerializer(wsgi.ResponseHeadersSerializer):
+ def multi(self, response, data):
+ response.status_int = 300
+
+
+def create_resource(version='1.0'):
+ controller = {
+ '1.0': VersionV10,
+ '1.1': VersionV11,
+ }[version]()
+
+ body_serializers = {
+ 'application/xml': VersionsXMLSerializer(),
+ 'application/atom+xml': VersionsAtomSerializer(),
+ }
+ serializer = wsgi.ResponseSerializer(body_serializers)
+
+ supported_content_types = ('application/json',
+ 'application/xml',
+ 'application/atom+xml')
+ deserializer = wsgi.RequestDeserializer(
+ supported_content_types=supported_content_types)
+
+ return wsgi.Resource(controller, serializer=serializer,
+ deserializer=deserializer)
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index be25e1e40..2873a8e0f 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
import hashlib
import os
@@ -149,8 +150,8 @@ class ViewBuilderV11(ViewBuilder):
def _build_detail(self, inst):
response = super(ViewBuilderV11, self)._build_detail(inst)
- response['server']['created'] = inst['created_at']
- response['server']['updated'] = inst['updated_at']
+ response['server']['created'] = utils.isotime(inst['created_at'])
+ response['server']['updated'] = utils.isotime(inst['updated_at'])
if 'status' in response['server']:
if response['server']['status'] == "ACTIVE":
response['server']['progress'] = 100
diff --git a/nova/api/openstack/views/versions.py b/nova/api/openstack/views/versions.py
index 9fa8f49dc..547289034 100644
--- a/nova/api/openstack/views/versions.py
+++ b/nova/api/openstack/views/versions.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import os
@@ -31,16 +32,44 @@ class ViewBuilder(object):
"""
self.base_url = base_url
- def build(self, version_data):
- """Generic method used to generate a version entity."""
- version = {
- "id": version_data["id"],
- "status": version_data["status"],
- "updated": version_data["updated"],
- "links": self._build_links(version_data),
- }
+ def build_choices(self, VERSIONS, req):
+ version_objs = []
+ for version in VERSIONS:
+ version = VERSIONS[version]
+ version_objs.append({
+ "id": version['id'],
+ "status": version['status'],
+ "links": [
+ {
+ "rel": "self",
+ "href": self.generate_href(version['id'], req.path)
+ }
+ ],
+ "media-types": version['media-types']
+ })
- return version
+ return dict(choices=version_objs)
+
+ def build_versions(self, versions):
+ version_objs = []
+ for version in versions:
+ version = versions[version]
+ version_objs.append({
+ "id": version['id'],
+ "status": version['status'],
+ "updated": version['updated'],
+ "links": self._build_links(version),
+ })
+
+ return dict(versions=version_objs)
+
+ def build_version(self, version):
+ reval = copy.deepcopy(version)
+ reval['links'].insert(0, {
+ "rel": "self",
+ "href": self.base_url.rstrip('/') + '/',
+ })
+ return dict(version=reval)
def _build_links(self, version_data):
"""Generate a container of links that refer to the provided version."""
@@ -55,6 +84,11 @@ class ViewBuilder(object):
return links
- def generate_href(self, version_number):
+ def generate_href(self, version_number, path=None):
"""Create an url that refers to a specific version_number."""
- return os.path.join(self.base_url, version_number) + '/'
+ version_number = version_number.strip('/')
+ if path:
+ path = path.strip('/')
+ return os.path.join(self.base_url, version_number, path)
+ else:
+ return os.path.join(self.base_url, version_number) + '/'
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index a28443d12..0eb47044e 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -13,6 +13,7 @@ from nova import wsgi
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
+
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
LOG = logging.getLogger('nova.api.openstack.wsgi')
@@ -165,12 +166,11 @@ class MetadataXMLDeserializer(XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request"""
- if metadata_node is None:
- return None
metadata = {}
- for meta_node in self.find_children_named(metadata_node, "meta"):
- key = meta_node.getAttribute("key")
- metadata[key] = self.extract_text(meta_node)
+ if metadata_node is not None:
+ for meta_node in self.find_children_named(metadata_node, "meta"):
+ key = meta_node.getAttribute("key")
+ metadata[key] = self.extract_text(meta_node)
return metadata
@@ -387,6 +387,8 @@ class XMLDictSerializer(DictSerializer):
link_node = xml_doc.createElement('atom:link')
link_node.setAttribute('rel', link['rel'])
link_node.setAttribute('href', link['href'])
+ if 'type' in link:
+ link_node.setAttribute('type', link['type'])
link_nodes.append(link_node)
return link_nodes
diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py
index a429b7812..c6d81ee04 100644
--- a/nova/auth/dbdriver.py
+++ b/nova/auth/dbdriver.py
@@ -127,7 +127,7 @@ class DbDriver(object):
try:
project = db.project_create(context.get_admin_context(), values)
- except exception.Duplicate:
+ except exception.DBError:
raise exception.ProjectExists(project=name)
for member in members:
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index b6131fb7f..6205cfb56 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -518,6 +518,14 @@ class AuthManager(object):
return drv.get_user_roles(User.safe_id(user),
Project.safe_id(project))
+ def get_active_roles(self, user, project=None):
+ """Get all active roles for context"""
+ if project:
+ roles = FLAGS.allowed_roles + ['projectmanager']
+ else:
+ roles = FLAGS.global_roles
+ return [role for role in roles if self.has_role(user, role, project)]
+
def get_project(self, pid):
"""Get project object by id"""
with self.driver() as drv:
@@ -730,10 +738,6 @@ class AuthManager(object):
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
- @staticmethod
- def get_key_pairs(context):
- return db.key_pair_get_all_by_user(context.elevated(), context.user_id)
-
def get_credentials(self, user, project=None, use_dmz=True):
"""Get credential zip for user in project"""
if not isinstance(user, User):
@@ -785,7 +789,7 @@ class AuthManager(object):
return read_buffer
def get_environment_rc(self, user, project=None, use_dmz=True):
- """Get credential zip for user in project"""
+ """Get environment rc for user in project"""
if not isinstance(user, User):
user = self.get_user(user)
if project is None:
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 7844d31e1..521525205 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -96,8 +96,8 @@ class CloudPipe(object):
def launch_vpn_instance(self, project_id):
LOG.debug(_("Launching VPN for %s") % (project_id))
project = self.manager.get_project(project_id)
- ctxt = context.RequestContext(user=project.project_manager,
- project=project)
+ ctxt = context.RequestContext(user=project.project_manager_id,
+ project=project.id)
key_name = self.setup_key_pair(ctxt)
group_name = self.setup_security_group(ctxt)
@@ -112,11 +112,11 @@ class CloudPipe(object):
security_group=[group_name])
def setup_security_group(self, context):
- group_name = '%s%s' % (context.project.id, FLAGS.vpn_key_suffix)
- if db.security_group_exists(context, context.project.id, group_name):
+ group_name = '%s%s' % (context.project_id, FLAGS.vpn_key_suffix)
+ if db.security_group_exists(context, context.project_id, group_name):
return group_name
- group = {'user_id': context.user.id,
- 'project_id': context.project.id,
+ group = {'user_id': context.user_id,
+ 'project_id': context.project_id,
'name': group_name,
'description': 'Group for vpn'}
group_ref = db.security_group_create(context, group)
@@ -137,12 +137,12 @@ class CloudPipe(object):
return group_name
def setup_key_pair(self, context):
- key_name = '%s%s' % (context.project.id, FLAGS.vpn_key_suffix)
+ key_name = '%s%s' % (context.project_id, FLAGS.vpn_key_suffix)
try:
- result = cloud._gen_key(context, context.user.id, key_name)
+ result = cloud._gen_key(context, context.user_id, key_name)
private_key = result['private_key']
try:
- key_dir = os.path.join(FLAGS.keys_path, context.user.id)
+ key_dir = os.path.join(FLAGS.keys_path, context.user_id)
if not os.path.exists(key_dir):
os.makedirs(key_dir)
key_path = os.path.join(key_dir, '%s.pem' % key_name)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index d1e5647d2..aae16d1da 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -689,7 +689,7 @@ class API(base.Base):
raise
instances = None
elif project_id or not context.is_admin:
- if not context.project:
+ if not context.project_id:
instances = self.db.instance_get_all_by_user(
context, context.user_id)
else:
@@ -940,18 +940,15 @@ class API(base.Base):
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s") % locals())
if not new_instance_type:
- raise exception.ApiError(_("Requested flavor %(flavor_id)d "
- "does not exist") % locals())
+ raise exception.FlavorNotFound(flavor_id=flavor_id)
current_memory_mb = current_instance_type['memory_mb']
new_memory_mb = new_instance_type['memory_mb']
if current_memory_mb > new_memory_mb:
- raise exception.ApiError(_("Invalid flavor: cannot downsize"
- "instances"))
+ raise exception.CannotResizeToSmallerSize()
if (current_memory_mb == new_memory_mb) and flavor_id:
- raise exception.ApiError(_("Invalid flavor: cannot use"
- "the same flavor. "))
+ raise exception.CannotResizeToSameSize()
instance_ref = self._get_instance(context, instance_id, 'resize')
self._cast_scheduler_message(context,
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index c13a629a9..824416514 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -132,11 +132,8 @@ def get_instance_type_by_name(name):
# flavors.
def get_instance_type_by_flavor_id(flavor_id):
"""Retrieve instance type by flavor_id."""
- if flavor_id is None:
- return get_default_instance_type()
+ ctxt = context.get_admin_context()
try:
- ctxt = context.get_admin_context()
return db.instance_type_get_by_flavor_id(ctxt, flavor_id)
- except exception.DBError, e:
- LOG.exception(_('DB error: %s') % e)
- raise exception.ApiError(_("Unknown flavor: %s") % flavor_id)
+ except ValueError:
+ raise exception.FlavorNotFound(flavor_id=flavor_id)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 173469bc3..cf4ee229d 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -326,7 +326,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_state(context, instance_id, power_state.BUILDING)
try:
- self.driver.spawn(instance, network_info, bd_mapping)
+ self.driver.spawn(context, instance, network_info, bd_mapping)
except Exception as ex: # pylint: disable=W0702
msg = _("Instance '%(instance_id)s' failed to spawn. Is "
"virtualization enabled in the BIOS? Details: "
@@ -433,7 +433,7 @@ class ComputeManager(manager.SchedulerDependentManager):
network_info = self.network_api.get_instance_nw_info(context,
instance_ref)
bd_mapping = self._setup_block_device_mapping(context, instance_id)
- self.driver.spawn(instance_ref, network_info, bd_mapping)
+ self.driver.spawn(context, instance_ref, network_info, bd_mapping)
self._update_image_ref(context, instance_id, image_ref)
self._update_launched_at(context, instance_id)
@@ -501,7 +501,7 @@ class ComputeManager(manager.SchedulerDependentManager):
'instance: %(instance_id)s (state: %(state)s '
'expected: %(running)s)') % locals())
- self.driver.snapshot(instance_ref, image_id)
+ self.driver.snapshot(context, instance_ref, image_id)
if image_type == 'snapshot':
if rotation:
@@ -660,7 +660,7 @@ class ComputeManager(manager.SchedulerDependentManager):
_update_state = lambda result: self._update_state_callback(
self, context, instance_id, result)
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.rescue(instance_ref, _update_state, network_info)
+ self.driver.rescue(context, instance_ref, _update_state, network_info)
self._update_state(context, instance_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -747,7 +747,7 @@ class ComputeManager(manager.SchedulerDependentManager):
local_gb=instance_type['local_gb'],
instance_type_id=instance_type['id']))
- self.driver.revert_resize(instance_ref)
+ self.driver.revert_migration(instance_ref)
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
usage_info = utils.usage_from_instance(instance_ref)
@@ -845,20 +845,26 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
migration_ref = self.db.migration_get(context, migration_id)
+
+ resize_instance = False
instance_ref = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
- instance_type = self.db.instance_type_get_by_flavor_id(context,
- migration_ref['new_flavor_id'])
- self.db.instance_update(context, instance_ref.uuid,
- dict(instance_type_id=instance_type['id'],
- memory_mb=instance_type['memory_mb'],
- vcpus=instance_type['vcpus'],
- local_gb=instance_type['local_gb']))
+ if migration_ref['old_flavor_id'] != migration_ref['new_flavor_id']:
+ instance_type = self.db.instance_type_get_by_flavor_id(context,
+ migration_ref['new_flavor_id'])
+ self.db.instance_update(context, instance_ref.uuid,
+ dict(instance_type_id=instance_type['id'],
+ memory_mb=instance_type['memory_mb'],
+ vcpus=instance_type['vcpus'],
+ local_gb=instance_type['local_gb']))
+ resize_instance = True
instance_ref = self.db.instance_get_by_uuid(context,
instance_ref.uuid)
+
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.finish_resize(instance_ref, disk_info, network_info)
+ self.driver.finish_migration(context, instance_ref, disk_info,
+ network_info, resize_instance)
self.db.migration_update(context, migration_id,
{'status': 'finished', })
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
deleted file mode 100644
index 9d8e2a25d..000000000
--- a/nova/compute/monitor.py
+++ /dev/null
@@ -1,435 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Instance Monitoring:
-
- Optionally may be run on each compute node. Provides RRD
- based statistics and graphs and makes them internally available
- in the object store.
-"""
-
-import datetime
-import os
-import time
-
-import boto
-import boto.s3
-import rrdtool
-from twisted.internet import task
-from twisted.application import service
-
-from nova import flags
-from nova import log as logging
-from nova import utils
-from nova.virt import connection as virt_connection
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_integer('monitoring_instances_delay', 5,
- 'Sleep time between updates')
-flags.DEFINE_integer('monitoring_instances_step', 300,
- 'Interval of RRD updates')
-flags.DEFINE_string('monitoring_rrd_path', '$state_path/monitor/instances',
- 'Location of RRD files')
-
-
-RRD_VALUES = {
- 'cpu': [
- 'DS:cpu:GAUGE:600:0:100',
- 'RRA:AVERAGE:0.5:1:800',
- 'RRA:AVERAGE:0.5:6:800',
- 'RRA:AVERAGE:0.5:24:800',
- 'RRA:AVERAGE:0.5:288:800',
- 'RRA:MAX:0.5:1:800',
- 'RRA:MAX:0.5:6:800',
- 'RRA:MAX:0.5:24:800',
- 'RRA:MAX:0.5:288:800',
- ],
- 'net': [
- 'DS:rx:COUNTER:600:0:1250000',
- 'DS:tx:COUNTER:600:0:1250000',
- 'RRA:AVERAGE:0.5:1:800',
- 'RRA:AVERAGE:0.5:6:800',
- 'RRA:AVERAGE:0.5:24:800',
- 'RRA:AVERAGE:0.5:288:800',
- 'RRA:MAX:0.5:1:800',
- 'RRA:MAX:0.5:6:800',
- 'RRA:MAX:0.5:24:800',
- 'RRA:MAX:0.5:288:800',
- ],
- 'disk': [
- 'DS:rd:COUNTER:600:U:U',
- 'DS:wr:COUNTER:600:U:U',
- 'RRA:AVERAGE:0.5:1:800',
- 'RRA:AVERAGE:0.5:6:800',
- 'RRA:AVERAGE:0.5:24:800',
- 'RRA:AVERAGE:0.5:288:800',
- 'RRA:MAX:0.5:1:800',
- 'RRA:MAX:0.5:6:800',
- 'RRA:MAX:0.5:24:800',
- 'RRA:MAX:0.5:444:800',
- ]}
-
-
-utcnow = utils.utcnow
-
-
-LOG = logging.getLogger('nova.compute.monitor')
-
-
-def update_rrd(instance, name, data):
- """
- Updates the specified RRD file.
- """
- filename = os.path.join(instance.get_rrd_path(), '%s.rrd' % name)
-
- if not os.path.exists(filename):
- init_rrd(instance, name)
-
- timestamp = int(time.mktime(utcnow().timetuple()))
- rrdtool.update(filename, '%d:%s' % (timestamp, data))
-
-
-def init_rrd(instance, name):
- """
- Initializes the specified RRD file.
- """
- path = os.path.join(FLAGS.monitoring_rrd_path, instance.instance_id)
-
- if not os.path.exists(path):
- os.makedirs(path)
-
- filename = os.path.join(path, '%s.rrd' % name)
-
- if not os.path.exists(filename):
- rrdtool.create(
- filename,
- '--step', '%d' % FLAGS.monitoring_instances_step,
- '--start', '0',
- *RRD_VALUES[name])
-
-
-def graph_cpu(instance, duration):
- """
- Creates a graph of cpu usage for the specified instance and duration.
- """
- path = instance.get_rrd_path()
- filename = os.path.join(path, 'cpu-%s.png' % duration)
-
- rrdtool.graph(
- filename,
- '--disable-rrdtool-tag',
- '--imgformat', 'PNG',
- '--width', '400',
- '--height', '120',
- '--start', 'now-%s' % duration,
- '--vertical-label', '% cpu used',
- '-l', '0',
- '-u', '100',
- 'DEF:cpu=%s:cpu:AVERAGE' % os.path.join(path, 'cpu.rrd'),
- 'AREA:cpu#eacc00:% CPU',)
-
- store_graph(instance.instance_id, filename)
-
-
-def graph_net(instance, duration):
- """
- Creates a graph of network usage for the specified instance and duration.
- """
- path = instance.get_rrd_path()
- filename = os.path.join(path, 'net-%s.png' % duration)
-
- rrdtool.graph(
- filename,
- '--disable-rrdtool-tag',
- '--imgformat', 'PNG',
- '--width', '400',
- '--height', '120',
- '--start', 'now-%s' % duration,
- '--vertical-label', 'bytes/s',
- '--logarithmic',
- '--units', 'si',
- '--lower-limit', '1000',
- '--rigid',
- 'DEF:rx=%s:rx:AVERAGE' % os.path.join(path, 'net.rrd'),
- 'DEF:tx=%s:tx:AVERAGE' % os.path.join(path, 'net.rrd'),
- 'AREA:rx#00FF00:In traffic',
- 'LINE1:tx#0000FF:Out traffic',)
-
- store_graph(instance.instance_id, filename)
-
-
-def graph_disk(instance, duration):
- """
- Creates a graph of disk usage for the specified duration.
- """
- path = instance.get_rrd_path()
- filename = os.path.join(path, 'disk-%s.png' % duration)
-
- rrdtool.graph(
- filename,
- '--disable-rrdtool-tag',
- '--imgformat', 'PNG',
- '--width', '400',
- '--height', '120',
- '--start', 'now-%s' % duration,
- '--vertical-label', 'bytes/s',
- '--logarithmic',
- '--units', 'si',
- '--lower-limit', '1000',
- '--rigid',
- 'DEF:rd=%s:rd:AVERAGE' % os.path.join(path, 'disk.rrd'),
- 'DEF:wr=%s:wr:AVERAGE' % os.path.join(path, 'disk.rrd'),
- 'AREA:rd#00FF00:Read',
- 'LINE1:wr#0000FF:Write',)
-
- store_graph(instance.instance_id, filename)
-
-
-def store_graph(instance_id, filename):
- """
- Transmits the specified graph file to internal object store on cloud
- controller.
- """
- # TODO(devcamcar): Need to use an asynchronous method to make this
- # connection. If boto has some separate method that generates
- # the request it would like to make and another method to parse
- # the response we can make our own client that does the actual
- # request and hands it off to the response parser.
- s3 = boto.s3.connection.S3Connection(
- aws_access_key_id=FLAGS.aws_access_key_id,
- aws_secret_access_key=FLAGS.aws_secret_access_key,
- is_secure=False,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- port=FLAGS.s3_port,
- host=FLAGS.s3_host)
- bucket_name = '_%s.monitor' % instance_id
-
- # Object store isn't creating the bucket like it should currently
- # when it is first requested, so have to catch and create manually.
- try:
- bucket = s3.get_bucket(bucket_name)
- except Exception:
- bucket = s3.create_bucket(bucket_name)
-
- key = boto.s3.Key(bucket)
- key.key = os.path.basename(filename)
- key.set_contents_from_filename(filename)
-
-
-class Instance(object):
- def __init__(self, conn, instance_id):
- self.conn = conn
- self.instance_id = instance_id
- self.last_updated = datetime.datetime.min
- self.cputime = 0
- self.cputime_last_updated = None
-
- init_rrd(self, 'cpu')
- init_rrd(self, 'net')
- init_rrd(self, 'disk')
-
- def needs_update(self):
- """
- Indicates whether this instance is due to have its statistics updated.
- """
- delta = utcnow() - self.last_updated
- return delta.seconds >= FLAGS.monitoring_instances_step
-
- def update(self):
- """
- Updates the instances statistics and stores the resulting graphs
- in the internal object store on the cloud controller.
- """
- LOG.debug(_('updating %s...'), self.instance_id)
-
- try:
- data = self.fetch_cpu_stats()
- if data is not None:
- LOG.debug('CPU: %s', data)
- update_rrd(self, 'cpu', data)
-
- data = self.fetch_net_stats()
- LOG.debug('NET: %s', data)
- update_rrd(self, 'net', data)
-
- data = self.fetch_disk_stats()
- LOG.debug('DISK: %s', data)
- update_rrd(self, 'disk', data)
-
- # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls
- # and make the methods @defer.inlineCallbacks.
- graph_cpu(self, '1d')
- graph_cpu(self, '1w')
- graph_cpu(self, '1m')
-
- graph_net(self, '1d')
- graph_net(self, '1w')
- graph_net(self, '1m')
-
- graph_disk(self, '1d')
- graph_disk(self, '1w')
- graph_disk(self, '1m')
- except Exception:
- LOG.exception(_('unexpected error during update'))
-
- self.last_updated = utcnow()
-
- def get_rrd_path(self):
- """
- Returns the path to where RRD files are stored.
- """
- return os.path.join(FLAGS.monitoring_rrd_path, self.instance_id)
-
- def fetch_cpu_stats(self):
- """
- Returns cpu usage statistics for this instance.
- """
- info = self.conn.get_info(self.instance_id)
-
- # Get the previous values.
- cputime_last = self.cputime
- cputime_last_updated = self.cputime_last_updated
-
- # Get the raw CPU time used in nanoseconds.
- self.cputime = float(info['cpu_time'])
- self.cputime_last_updated = utcnow()
-
- LOG.debug('CPU: %d', self.cputime)
-
- # Skip calculation on first pass. Need delta to get a meaningful value.
- if cputime_last_updated is None:
- return None
-
- # Calculate the number of seconds between samples.
- d = self.cputime_last_updated - cputime_last_updated
- t = d.days * 86400 + d.seconds
-
- LOG.debug('t = %d', t)
-
- # Calculate change over time in number of nanoseconds of CPU time used.
- cputime_delta = self.cputime - cputime_last
-
- LOG.debug('cputime_delta = %s', cputime_delta)
-
- # Get the number of virtual cpus in this domain.
- vcpus = int(info['num_cpu'])
-
- LOG.debug('vcpus = %d', vcpus)
-
- # Calculate CPU % used and cap at 100.
- return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100)
-
- def fetch_disk_stats(self):
- """
- Returns disk usage statistics for this instance.
- """
- rd = 0
- wr = 0
-
- disks = self.conn.get_disks(self.instance_id)
-
- # Aggregate the read and write totals.
- for disk in disks:
- try:
- rd_req, rd_bytes, wr_req, wr_bytes, errs = \
- self.conn.block_stats(self.instance_id, disk)
- rd += rd_bytes
- wr += wr_bytes
- except TypeError:
- iid = self.instance_id
- LOG.error(_('Cannot get blockstats for "%(disk)s"'
- ' on "%(iid)s"') % locals())
- raise
-
- return '%d:%d' % (rd, wr)
-
- def fetch_net_stats(self):
- """
- Returns network usage statistics for this instance.
- """
- rx = 0
- tx = 0
-
- interfaces = self.conn.get_interfaces(self.instance_id)
-
- # Aggregate the in and out totals.
- for interface in interfaces:
- try:
- stats = self.conn.interface_stats(self.instance_id, interface)
- rx += stats[0]
- tx += stats[4]
- except TypeError:
- iid = self.instance_id
- LOG.error(_('Cannot get ifstats for "%(interface)s"'
- ' on "%(iid)s"') % locals())
- raise
-
- return '%d:%d' % (rx, tx)
-
-
-class InstanceMonitor(object, service.Service):
- """
- Monitors the running instances of the current machine.
- """
-
- def __init__(self):
- """
- Initialize the monitoring loop.
- """
- self._instances = {}
- self._loop = task.LoopingCall(self.updateInstances)
-
- def startService(self):
- self._instances = {}
- self._loop.start(interval=FLAGS.monitoring_instances_delay)
- service.Service.startService(self)
-
- def stopService(self):
- self._loop.stop()
- service.Service.stopService(self)
-
- def updateInstances(self):
- """
- Update resource usage for all running instances.
- """
- try:
- conn = virt_connection.get_connection(read_only=True)
- except Exception, exn:
- LOG.exception(_('unexpected exception getting connection'))
- time.sleep(FLAGS.monitoring_instances_delay)
- return
-
- domain_ids = conn.list_instances()
- try:
- self.updateInstances_(conn, domain_ids)
- except Exception, exn:
- LOG.exception('updateInstances_')
-
- def updateInstances_(self, conn, domain_ids):
- for domain_id in domain_ids:
- if not domain_id in self._instances:
- instance = Instance(conn, domain_id)
- self._instances[domain_id] = instance
- LOG.debug(_('Found instance: %s'), domain_id)
-
- for key in self._instances.keys():
- instance = self._instances[key]
- if instance.needs_update():
- instance.update()
diff --git a/nova/context.py b/nova/context.py
index 99085ed75..b917a1d81 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -18,9 +18,8 @@
"""RequestContext: context for requests that persist through all of nova."""
-import random
+import uuid
-from nova import exception
from nova import utils
@@ -31,86 +30,54 @@ class RequestContext(object):
"""
- def __init__(self, user, project, is_admin=None, read_deleted=False,
- remote_address=None, timestamp=None, request_id=None):
- if hasattr(user, 'id'):
- self._user = user
- self.user_id = user.id
- else:
- self._user = None
- self.user_id = user
- if hasattr(project, 'id'):
- self._project = project
- self.project_id = project.id
- else:
- self._project = None
- self.project_id = project
- if is_admin is None:
- if self.user_id and self.user:
- self.is_admin = self.user.is_admin()
- else:
- self.is_admin = False
- else:
- self.is_admin = is_admin
+ def __init__(self, user_id, project_id, is_admin=None, read_deleted=False,
+ roles=None, remote_address=None, timestamp=None,
+ request_id=None, auth_token=None):
+ self.user_id = user_id
+ self.project_id = project_id
+ self.roles = roles or []
+ self.is_admin = is_admin
+ if self.is_admin is None:
+ self.admin = 'admin' in self.roles
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = utils.utcnow()
- if isinstance(timestamp, str) or isinstance(timestamp, unicode):
- timestamp = utils.parse_isotime(timestamp)
+ if isinstance(timestamp, basestring):
+ timestamp = utils.parse_strtime(timestamp)
self.timestamp = timestamp
if not request_id:
- chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-'
- request_id = ''.join([random.choice(chars) for x in xrange(20)])
+ request_id = unicode(uuid.uuid4())
self.request_id = request_id
-
- @property
- def user(self):
- # NOTE(vish): Delay import of manager, so that we can import this
- # file from manager.
- from nova.auth import manager
- if not self._user:
- try:
- self._user = manager.AuthManager().get_user(self.user_id)
- except exception.NotFound:
- pass
- return self._user
-
- @property
- def project(self):
- # NOTE(vish): Delay import of manager, so that we can import this
- # file from manager.
- from nova.auth import manager
- if not self._project:
- try:
- auth_manager = manager.AuthManager()
- self._project = auth_manager.get_project(self.project_id)
- except exception.NotFound:
- pass
- return self._project
+ self.auth_token = auth_token
def to_dict(self):
- return {'user': self.user_id,
- 'project': self.project_id,
+ return {'user_id': self.user_id,
+ 'project_id': self.project_id,
'is_admin': self.is_admin,
'read_deleted': self.read_deleted,
+ 'roles': self.roles,
'remote_address': self.remote_address,
- 'timestamp': utils.isotime(self.timestamp),
- 'request_id': self.request_id}
+ 'timestamp': utils.strtime(self.timestamp),
+ 'request_id': self.request_id,
+ 'auth_token': self.auth_token}
@classmethod
def from_dict(cls, values):
return cls(**values)
- def elevated(self, read_deleted=False):
+ def elevated(self, read_deleted=None):
"""Return a version of this context with admin flag set."""
- return RequestContext(self.user_id,
- self.project_id,
- True,
- read_deleted,
- self.remote_address,
- self.timestamp,
- self.request_id)
+ rd = self.read_deleted if read_deleted is None else read_deleted
+ return RequestContext(user_id=self.user_id,
+ project_id=self.project_id,
+ is_admin=True,
+ read_deleted=rd,
+ roles=self.roles,
+ remote_address=self.remote_address,
+ timestamp=self.timestamp,
+ request_id=self.request_id,
+ auth_token=self.auth_token)
def get_admin_context(read_deleted=False):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index d7810098a..4f1445217 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -62,7 +62,7 @@ def is_user_context(context):
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
- if not context.project:
+ if not context.project_id:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
@@ -71,7 +71,7 @@ def authorize_project_context(context, project_id):
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
- if not context.user:
+ if not context.user_id:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
@@ -1312,7 +1312,7 @@ def instance_get_fixed_addresses_v6(context, instance_id):
# combine prefixes, macs, and project_id into (prefix,mac,p_id) tuples
prefix_mac_tuples = zip(prefixes, macs, [project_id for m in macs])
# return list containing ipv6 address for each tuple
- return [ipv6.to_global_ipv6(*t) for t in prefix_mac_tuples]
+ return [ipv6.to_global(*t) for t in prefix_mac_tuples]
@require_context
@@ -1680,7 +1680,8 @@ def network_get_by_bridge(context, bridge):
def network_get_by_cidr(context, cidr):
session = get_session()
result = session.query(models.Network).\
- filter_by(cidr=cidr).first()
+ filter(or_(models.Network.cidr == cidr,
+ models.Network.cidr_v6 == cidr)).first()
if not result:
raise exception.NetworkNotFoundForCidr(cidr=cidr)
@@ -3041,13 +3042,18 @@ def instance_type_get_by_name(context, name):
@require_context
def instance_type_get_by_flavor_id(context, id):
"""Returns a dict describing specific flavor_id"""
+ try:
+ flavor_id = int(id)
+ except ValueError:
+ raise exception.FlavorNotFound(flavor_id=id)
+
session = get_session()
inst_type = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
- filter_by(flavorid=int(id)).\
+ filter_by(flavorid=flavor_id).\
first()
if not inst_type:
- raise exception.FlavorNotFound(flavor_id=id)
+ raise exception.FlavorNotFound(flavor_id=flavor_id)
else:
return _dict_with_extra_specs(inst_type)
@@ -3247,8 +3253,8 @@ def agent_build_destroy(context, agent_build_id):
with session.begin():
session.query(models.AgentBuild).\
filter_by(id=agent_build_id).\
- update({'deleted': 1,
- 'deleted_at': datetime.datetime.utcnow(),
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -3297,7 +3303,7 @@ def instance_type_extra_specs_delete(context, instance_type_id, key):
def instance_type_extra_specs_get_item(context, instance_type_id, key):
session = get_session()
- sppec_result = session.query(models.InstanceTypeExtraSpecs).\
+ spec_result = session.query(models.InstanceTypeExtraSpecs).\
filter_by(instance_type_id=instance_type_id).\
filter_by(key=key).\
filter_by(deleted=False).\
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 7e35c2cba..056259539 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -177,14 +177,6 @@ class Instance(BASE, NovaBase):
user_id = Column(String(255))
project_id = Column(String(255))
- @property
- def user(self):
- return auth.manager.AuthManager().get_user(self.user_id)
-
- @property
- def project(self):
- return auth.manager.AuthManager().get_project(self.project_id)
-
image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
@@ -465,14 +457,6 @@ class SecurityGroup(BASE, NovaBase):
'Instance.deleted == False)',
backref='security_groups')
- @property
- def user(self):
- return auth.manager.AuthManager().get_user(self.user_id)
-
- @property
- def project(self):
- return auth.manager.AuthManager().get_project(self.project_id)
-
class SecurityGroupIngressRule(BASE, NovaBase):
"""Represents a rule in a security group."""
diff --git a/nova/exception.py b/nova/exception.py
index 8c9b45a80..68e6ac937 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -692,3 +692,11 @@ class PasteConfigNotFound(NotFound):
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
+
+
+class CannotResizeToSameSize(NovaException):
+ message = _("When resizing, instances must change size!")
+
+
+class CannotResizeToSmallerSize(NovaException):
+ message = _("Resizing to a smaller size is not supported.")
diff --git a/nova/flags.py b/nova/flags.py
index 49355b436..fa6d8860a 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -343,7 +343,7 @@ DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'),
'Directory for lock files')
DEFINE_string('logdir', None, 'output to a per-service log file in named '
'directory')
-
+DEFINE_integer('logfile_mode', 0644, 'Default file mode of the logs.')
DEFINE_string('sqlite_db', 'nova.sqlite', 'file name for sqlite')
DEFINE_string('sql_connection',
'sqlite:///$state_path/$sqlite_db',
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 5c2dc957b..44a3c6f83 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -83,11 +83,16 @@ class GlanceImageService(service.BaseImageService):
client = property(_get_client, _set_client)
+ def _set_client_context(self, context):
+ """Sets the client's auth token."""
+ self.client.set_auth_token(context.auth_token)
+
def index(self, context, filters=None, marker=None, limit=None):
"""Calls out to Glance for a list of images available."""
# NOTE(sirp): We need to use `get_images_detailed` and not
# `get_images` here because we need `is_public` and `properties`
# included so we can filter by user
+ self._set_client_context(context)
filtered = []
filters = filters or {}
if 'is_public' not in filters:
@@ -104,6 +109,7 @@ class GlanceImageService(service.BaseImageService):
def detail(self, context, filters=None, marker=None, limit=None):
"""Calls out to Glance for a list of detailed image information."""
+ self._set_client_context(context)
filtered = []
filters = filters or {}
if 'is_public' not in filters:
@@ -120,6 +126,7 @@ class GlanceImageService(service.BaseImageService):
def show(self, context, image_id):
"""Returns a dict with image data for the given opaque image id."""
+ self._set_client_context(context)
try:
image_meta = self.client.get_image_meta(image_id)
except glance_exception.NotFound:
@@ -143,6 +150,7 @@ class GlanceImageService(service.BaseImageService):
def get(self, context, image_id, data):
"""Calls out to Glance for metadata and data and writes data."""
+ self._set_client_context(context)
try:
image_meta, image_chunks = self.client.get_image(image_id)
except glance_exception.NotFound:
@@ -160,6 +168,7 @@ class GlanceImageService(service.BaseImageService):
:raises: AlreadyExists if the image already exist.
"""
+ self._set_client_context(context)
# Translate Base -> Service
LOG.debug(_('Creating image in Glance. Metadata passed in %s'),
image_meta)
@@ -182,6 +191,7 @@ class GlanceImageService(service.BaseImageService):
:raises: ImageNotFound if the image does not exist.
"""
+ self._set_client_context(context)
# NOTE(vish): show is to check if image is available
self.show(context, image_id)
try:
@@ -198,6 +208,7 @@ class GlanceImageService(service.BaseImageService):
:raises: ImageNotFound if the image does not exist.
"""
+ self._set_client_context(context)
# NOTE(vish): show is to check if image is available
self.show(context, image_id)
try:
diff --git a/nova/image/s3.py b/nova/image/s3.py
index c313c7a13..ccbfa89cd 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -34,7 +34,6 @@ from nova import flags
from nova import image
from nova import log as logging
from nova import utils
-from nova.auth import manager
from nova.image import service
from nova.api.ec2 import ec2utils
@@ -43,6 +42,10 @@ LOG = logging.getLogger("nova.image.s3")
FLAGS = flags.FLAGS
flags.DEFINE_string('image_decryption_dir', '/tmp',
'parent dir for tempdir used for image decryption')
+flags.DEFINE_string('s3_access_key', 'notchecked',
+ 'access key to use for s3 server for images')
+flags.DEFINE_string('s3_secret_key', 'notchecked',
+ 'secret key to use for s3 server for images')
class S3ImageService(service.BaseImageService):
@@ -82,11 +85,10 @@ class S3ImageService(service.BaseImageService):
@staticmethod
def _conn(context):
- # TODO(vish): is there a better way to get creds to sign
- # for the user?
- access = manager.AuthManager().get_access_key(context.user,
- context.project)
- secret = str(context.user.secret)
+ # NOTE(vish): access and secret keys for s3 server are not
+ # checked in nova-objectstore
+ access = FLAGS.s3_access_key
+ secret = FLAGS.s3_secret_key
calling = boto.s3.connection.OrdinaryCallingFormat()
return boto.s3.connection.S3Connection(aws_access_key_id=access,
aws_secret_access_key=secret,
diff --git a/nova/log.py b/nova/log.py
index f8c0ba68d..222b8c5fb 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -43,8 +43,8 @@ from nova import version
FLAGS = flags.FLAGS
flags.DEFINE_string('logging_context_format_string',
'%(asctime)s %(levelname)s %(name)s '
- '[%(request_id)s %(user)s '
- '%(project)s] %(message)s',
+ '[%(request_id)s %(user_id)s '
+ '%(project_id)s] %(message)s',
'format string to use for log messages with context')
flags.DEFINE_string('logging_default_format_string',
'%(asctime)s %(levelname)s %(name)s [-] '
@@ -257,6 +257,7 @@ class NovaRootLogger(NovaLogger):
self.filelog = WatchedFileHandler(logpath)
self.addHandler(self.filelog)
self.logpath = logpath
+ os.chmod(self.logpath, FLAGS.logfile_mode)
else:
self.removeHandler(self.filelog)
self.addHandler(self.streamlog)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 4a3791d8a..8fc6a295f 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -410,8 +410,11 @@ class NetworkManager(manager.SchedulerDependentManager):
kwargs can contain fixed_ips to circumvent another db lookup
"""
instance_id = kwargs.pop('instance_id')
- fixed_ips = kwargs.get('fixed_ips') or \
+ try:
+ fixed_ips = kwargs.get('fixed_ips') or \
self.db.fixed_ip_get_by_instance(context, instance_id)
+ except exceptions.FixedIpNotFoundForInstance:
+ fixed_ips = []
LOG.debug(_("network deallocation for instance |%s|"), instance_id,
context=context)
# deallocate fixed ips
@@ -551,15 +554,17 @@ class NetworkManager(manager.SchedulerDependentManager):
# with a network, or a cluster of computes with a network
# and use that network here with a method like
# network_get_by_compute_host
- address = self.db.fixed_ip_associate_pool(context.elevated(),
- network['id'],
- instance_id)
- vif = self.db.virtual_interface_get_by_instance_and_network(context,
- instance_id,
- network['id'])
- values = {'allocated': True,
- 'virtual_interface_id': vif['id']}
- self.db.fixed_ip_update(context, address, values)
+ address = None
+ if network['cidr']:
+ address = self.db.fixed_ip_associate_pool(context.elevated(),
+ network['id'],
+ instance_id)
+ get_vif = self.db.virtual_interface_get_by_instance_and_network
+ vif = get_vif(context, instance_id, network['id'])
+ values = {'allocated': True,
+ 'virtual_interface_id': vif['id']}
+ self.db.fixed_ip_update(context, address, values)
+
self._setup_network(context, network)
return address
@@ -613,34 +618,39 @@ class NetworkManager(manager.SchedulerDependentManager):
network_size, cidr_v6, gateway_v6, bridge,
bridge_interface, dns1=None, dns2=None, **kwargs):
"""Create networks based on parameters."""
- fixed_net = netaddr.IPNetwork(cidr)
- if FLAGS.use_ipv6:
+ if cidr_v6:
fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
significant_bits_v6 = 64
network_size_v6 = 1 << 64
- for index in range(num_networks):
- start = index * network_size
+ if cidr:
+ fixed_net = netaddr.IPNetwork(cidr)
significant_bits = 32 - int(math.log(network_size, 2))
- cidr = '%s/%s' % (fixed_net[start], significant_bits)
- project_net = netaddr.IPNetwork(cidr)
+
+ for index in range(num_networks):
net = {}
net['bridge'] = bridge
net['bridge_interface'] = bridge_interface
net['dns1'] = dns1
net['dns2'] = dns2
- net['cidr'] = cidr
- net['multi_host'] = multi_host
- net['netmask'] = str(project_net.netmask)
- net['gateway'] = str(project_net[1])
- net['broadcast'] = str(project_net.broadcast)
- net['dhcp_start'] = str(project_net[2])
+
+ if cidr:
+ start = index * network_size
+ project_net = netaddr.IPNetwork('%s/%s' % (fixed_net[start],
+ significant_bits))
+ net['cidr'] = str(project_net)
+ net['multi_host'] = multi_host
+ net['netmask'] = str(project_net.netmask)
+ net['gateway'] = str(project_net[1])
+ net['broadcast'] = str(project_net.broadcast)
+ net['dhcp_start'] = str(project_net[2])
+
if num_networks > 1:
net['label'] = '%s_%d' % (label, index)
else:
net['label'] = label
- if FLAGS.use_ipv6:
+ if cidr_v6:
start_v6 = index * network_size_v6
cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6],
significant_bits_v6)
@@ -673,11 +683,11 @@ class NetworkManager(manager.SchedulerDependentManager):
# None if network with cidr or cidr_v6 already exists
network = self.db.network_create_safe(context, net)
- if network:
+ if not network:
+ raise ValueError(_('Network already exists!'))
+
+ if network and cidr:
self._create_fixed_ips(context, network['id'])
- else:
- raise ValueError(_('Network with cidr %s already exists') %
- cidr)
@property
def _bottom_reserved_ips(self): # pylint: disable=R0201
diff --git a/nova/notifier/api.py b/nova/notifier/api.py
index 98969fd3e..e18f3e280 100644
--- a/nova/notifier/api.py
+++ b/nova/notifier/api.py
@@ -80,6 +80,10 @@ def notify(publisher_id, event_type, priority, payload):
if priority not in log_levels:
raise BadPriorityException(
_('%s not in valid priorities' % priority))
+
+ # Ensure everything is JSON serializable.
+ payload = utils.to_primitive(payload, convert_instances=True)
+
driver = utils.import_object(FLAGS.notification_driver)
msg = dict(message_id=str(uuid.uuid4()),
publisher_id=publisher_id,
diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py
new file mode 100644
index 000000000..bdf7f705b
--- /dev/null
+++ b/nova/rpc/__init__.py
@@ -0,0 +1,66 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.utils import import_object
+from nova.rpc.common import RemoteError, LOG
+from nova import flags
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('rpc_backend',
+ 'nova.rpc.amqp',
+ "The messaging module to use, defaults to AMQP.")
+
+RPCIMPL = import_object(FLAGS.rpc_backend)
+
+
+def create_connection(new=True):
+ return RPCIMPL.Connection.instance(new=True)
+
+
+def create_consumer(conn, topic, proxy, fanout=False):
+ if fanout:
+ return RPCIMPL.FanoutAdapterConsumer(
+ connection=conn,
+ topic=topic,
+ proxy=proxy)
+ else:
+ return RPCIMPL.TopicAdapterConsumer(
+ connection=conn,
+ topic=topic,
+ proxy=proxy)
+
+
+def create_consumer_set(conn, consumers):
+ return RPCIMPL.ConsumerSet(connection=conn, consumer_list=consumers)
+
+
+def call(context, topic, msg):
+ return RPCIMPL.call(context, topic, msg)
+
+
+def cast(context, topic, msg):
+ return RPCIMPL.cast(context, topic, msg)
+
+
+def fanout_cast(context, topic, msg):
+ return RPCIMPL.fanout_cast(context, topic, msg)
+
+
+def multicall(context, topic, msg):
+ return RPCIMPL.multicall(context, topic, msg)
diff --git a/nova/rpc.py b/nova/rpc/amqp.py
index e2771ca88..61555795a 100644
--- a/nova/rpc.py
+++ b/nova/rpc/amqp.py
@@ -44,9 +44,7 @@ from nova import fakerabbit
from nova import flags
from nova import log as logging
from nova import utils
-
-
-LOG = logging.getLogger('nova.rpc')
+from nova.rpc.common import RemoteError, LOG
FLAGS = flags.FLAGS
@@ -418,25 +416,6 @@ def msg_reply(msg_id, reply=None, failure=None):
publisher.close()
-class RemoteError(exception.Error):
- """Signifies that a remote class has raised an exception.
-
- Containes a string representation of the type of the original exception,
- the value of the original exception, and the traceback. These are
- sent to the parent as a joined string so printing the exception
- contains all of the relevent info.
-
- """
-
- def __init__(self, exc_type, value, traceback):
- self.exc_type = exc_type
- self.value = value
- self.traceback = traceback
- super(RemoteError, self).__init__('%s %s\n%s' % (exc_type,
- value,
- traceback))
-
-
def _unpack_context(msg):
"""Unpack context from msg."""
context_dict = {}
diff --git a/nova/rpc/common.py b/nova/rpc/common.py
new file mode 100644
index 000000000..1d3065a83
--- /dev/null
+++ b/nova/rpc/common.py
@@ -0,0 +1,23 @@
+from nova import exception
+from nova import log as logging
+
+LOG = logging.getLogger('nova.rpc')
+
+
+class RemoteError(exception.Error):
+ """Signifies that a remote class has raised an exception.
+
+ Containes a string representation of the type of the original exception,
+ the value of the original exception, and the traceback. These are
+ sent to the parent as a joined string so printing the exception
+ contains all of the relevent info.
+
+ """
+
+ def __init__(self, exc_type, value, traceback):
+ self.exc_type = exc_type
+ self.value = value
+ self.traceback = traceback
+ super(RemoteError, self).__init__('%s %s\n%s' % (exc_type,
+ value,
+ traceback))
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
index 6f5eb66fd..8c400d476 100644
--- a/nova/scheduler/least_cost.py
+++ b/nova/scheduler/least_cost.py
@@ -28,6 +28,7 @@ from nova import flags
from nova import log as logging
from nova.scheduler import zone_aware_scheduler
from nova import utils
+from nova import exception
LOG = logging.getLogger('nova.scheduler.least_cost')
diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py
index c429fdfcc..d99d7214c 100644
--- a/nova/scheduler/zone_aware_scheduler.py
+++ b/nova/scheduler/zone_aware_scheduler.py
@@ -81,7 +81,7 @@ class ZoneAwareScheduler(driver.Scheduler):
decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
try:
json_entry = decryptor(blob)
- return json.dumps(entry)
+ return json.dumps(json_entry)
except M2Crypto.EVP.EVPError:
pass
return None
diff --git a/nova/service.py b/nova/service.py
index 00e4f61e5..6e9eddc5a 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -149,26 +149,22 @@ class Service(object):
if 'nova-compute' == self.binary:
self.manager.update_available_resource(ctxt)
- self.conn = rpc.Connection.instance(new=True)
+ self.conn = rpc.create_connection(new=True)
logging.debug("Creating Consumer connection for Service %s" %
self.topic)
# Share this same connection for these Consumers
- consumer_all = rpc.TopicAdapterConsumer(
- connection=self.conn,
- topic=self.topic,
- proxy=self)
- consumer_node = rpc.TopicAdapterConsumer(
- connection=self.conn,
- topic='%s.%s' % (self.topic, self.host),
- proxy=self)
- fanout = rpc.FanoutAdapterConsumer(
- connection=self.conn,
- topic=self.topic,
- proxy=self)
- consumer_set = rpc.ConsumerSet(
- connection=self.conn,
- consumer_list=[consumer_all, consumer_node, fanout])
+ consumer_all = rpc.create_consumer(self.conn, self.topic, self,
+ fanout=False)
+
+ node_topic = '%s.%s' % (self.topic, self.host)
+ consumer_node = rpc.create_consumer(self.conn, node_topic, self,
+ fanout=False)
+
+ fanout = rpc.create_consumer(self.conn, self.topic, self, fanout=True)
+
+ consumers = [consumer_all, consumer_node, fanout]
+ consumer_set = rpc.create_consumer_set(self.conn, consumers)
# Wait forever, processing these consumers
def _wait():
diff --git a/nova/test.py b/nova/test.py
index 9790b0aa1..549aa6fcf 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -99,9 +99,7 @@ class TestCase(unittest.TestCase):
self.flag_overrides = {}
self.injected = []
self._services = []
- self._monkey_patch_attach()
self._original_flags = FLAGS.FlagValuesDict()
- rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size)
def tearDown(self):
"""Runs after each test method to tear down test environment."""
@@ -126,9 +124,6 @@ class TestCase(unittest.TestCase):
# Reset any overriden flags
self.reset_flags()
- # Reset our monkey-patches
- rpc.Consumer.attach_to_eventlet = self.original_attach
-
# Stop any timers
for x in self.injected:
try:
@@ -172,17 +167,6 @@ class TestCase(unittest.TestCase):
self._services.append(svc)
return svc
- def _monkey_patch_attach(self):
- self.original_attach = rpc.Consumer.attach_to_eventlet
-
- def _wrapped(inner_self):
- rv = self.original_attach(inner_self)
- self.injected.append(rv)
- return rv
-
- _wrapped.func_name = self.original_attach.func_name
- rpc.Consumer.attach_to_eventlet = _wrapped
-
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py
index de006d088..ab7ae2e54 100644
--- a/nova/tests/api/openstack/contrib/test_floating_ips.py
+++ b/nova/tests/api/openstack/contrib/test_floating_ips.py
@@ -74,12 +74,8 @@ class FloatingIpTest(test.TestCase):
def setUp(self):
super(FloatingIpTest, self).setUp()
self.controller = FloatingIPController()
- self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.reset_fake_data()
- fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_auth(self.stubs)
self.stubs.Set(network.api.API, "get_floating_ip",
network_api_get_floating_ip)
self.stubs.Set(network.api.API, "list_floating_ips",
@@ -96,7 +92,6 @@ class FloatingIpTest(test.TestCase):
self._create_floating_ip()
def tearDown(self):
- self.stubs.UnsetAll()
self._delete_floating_ip()
super(FloatingIpTest, self).tearDown()
@@ -111,6 +106,11 @@ class FloatingIpTest(test.TestCase):
self.assertEqual(view['floating_ip']['fixed_ip'], None)
self.assertEqual(view['floating_ip']['instance_id'], None)
+ def test_translate_floating_ip_view_dict(self):
+ floating_ip = {'id': 0, 'address': '10.0.0.10', 'fixed_ip': None}
+ view = _translate_floating_ip_view(floating_ip)
+ self.assertTrue('floating_ip' in view)
+
def test_floating_ips_list(self):
req = webob.Request.blank('/v1.1/os-floating-ips')
res = req.get_response(fakes.wsgi_app())
diff --git a/nova/tests/api/openstack/contrib/test_multinic_xs.py b/nova/tests/api/openstack/contrib/test_multinic_xs.py
index b0a9f7676..ac28f6be6 100644
--- a/nova/tests/api/openstack/contrib/test_multinic_xs.py
+++ b/nova/tests/api/openstack/contrib/test_multinic_xs.py
@@ -42,22 +42,14 @@ def compute_api_remove_fixed_ip(self, context, instance_id, address):
class FixedIpTest(test.TestCase):
def setUp(self):
super(FixedIpTest, self).setUp()
- self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.reset_fake_data()
- fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_auth(self.stubs)
self.stubs.Set(compute.api.API, "add_fixed_ip",
compute_api_add_fixed_ip)
self.stubs.Set(compute.api.API, "remove_fixed_ip",
compute_api_remove_fixed_ip)
self.context = context.get_admin_context()
- def tearDown(self):
- self.stubs.UnsetAll()
- super(FixedIpTest, self).tearDown()
-
def test_add_fixed_ip(self):
global last_add_fixed_ip
last_add_fixed_ip = (None, None)
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 26b1de818..a67a28a4e 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -29,6 +29,7 @@ from glance.common import exception as glance_exc
from nova import context
from nova import exception as exc
from nova import utils
+from nova import wsgi
import nova.api.openstack.auth
from nova.api import openstack
from nova.api.openstack import auth
@@ -40,14 +41,13 @@ import nova.image.fake
from nova.image import glance
from nova.image import service
from nova.tests import fake_flags
-from nova.wsgi import Router
class Context(object):
pass
-class FakeRouter(Router):
+class FakeRouter(wsgi.Router):
def __init__(self):
pass
@@ -68,21 +68,30 @@ def fake_auth_init(self, application):
@webob.dec.wsgify
def fake_wsgi(self, req):
- req.environ['nova.context'] = context.RequestContext(1, 1)
return self.application
-def wsgi_app(inner_app10=None, inner_app11=None):
+def wsgi_app(inner_app10=None, inner_app11=None, fake_auth=True):
if not inner_app10:
inner_app10 = openstack.APIRouterV10()
if not inner_app11:
inner_app11 = openstack.APIRouterV11()
- mapper = urlmap.URLMap()
- api10 = openstack.FaultWrapper(auth.AuthMiddleware(
+
+ if fake_auth:
+ ctxt = context.RequestContext('fake', 'fake')
+ api10 = openstack.FaultWrapper(wsgi.InjectContext(ctxt,
+ limits.RateLimitingMiddleware(inner_app10)))
+ api11 = openstack.FaultWrapper(wsgi.InjectContext(ctxt,
+ limits.RateLimitingMiddleware(
+ extensions.ExtensionMiddleware(inner_app11))))
+ else:
+ api10 = openstack.FaultWrapper(auth.AuthMiddleware(
limits.RateLimitingMiddleware(inner_app10)))
- api11 = openstack.FaultWrapper(auth.AuthMiddleware(
+ api11 = openstack.FaultWrapper(auth.AuthMiddleware(
limits.RateLimitingMiddleware(
extensions.ExtensionMiddleware(inner_app11))))
+ Auth = auth
+ mapper = urlmap.URLMap()
mapper['/v1.0'] = api10
mapper['/v1.1'] = api11
mapper['/'] = openstack.FaultWrapper(versions.Versions())
@@ -104,8 +113,7 @@ def stub_out_key_pair_funcs(stubs, have_key_pair=True):
def stub_out_image_service(stubs):
def fake_get_image_service(image_href):
- image_id = int(str(image_href).split('/')[-1])
- return (nova.image.fake.FakeImageService(), image_id)
+ return (nova.image.fake.FakeImageService(), image_href)
stubs.Set(nova.image, 'get_image_service', fake_get_image_service)
stubs.Set(nova.image, 'get_default_image_service',
lambda: nova.image.fake.FakeImageService())
@@ -359,17 +367,18 @@ class FakeAuthManager(object):
if admin is not None:
user.admin = admin
- def is_admin(self, user):
+ def is_admin(self, user_id):
+ user = self.get_user(user_id)
return user.admin
- def is_project_member(self, user, project):
+ def is_project_member(self, user_id, project):
if not isinstance(project, Project):
try:
project = self.get_project(project)
except exc.NotFound:
raise webob.exc.HTTPUnauthorized()
- return ((user.id in project.member_ids) or
- (user.id == project.project_manager_id))
+ return ((user_id in project.member_ids) or
+ (user_id == project.project_manager_id))
def create_project(self, name, manager_user, description=None,
member_users=None):
@@ -396,13 +405,13 @@ class FakeAuthManager(object):
else:
raise exc.NotFound
- def get_projects(self, user=None):
- if not user:
+ def get_projects(self, user_id=None):
+ if not user_id:
return FakeAuthManager.projects.values()
else:
return [p for p in FakeAuthManager.projects.values()
- if (user.id in p.member_ids) or
- (user.id == p.project_manager_id)]
+ if (user_id in p.member_ids) or
+ (user_id == p.project_manager_id)]
class FakeRateLimiter(object):
diff --git a/nova/tests/api/openstack/test_accounts.py b/nova/tests/api/openstack/test_accounts.py
index 64abcf48c..89dbf5213 100644
--- a/nova/tests/api/openstack/test_accounts.py
+++ b/nova/tests/api/openstack/test_accounts.py
@@ -16,7 +16,6 @@
import json
-import stubout
import webob
from nova import flags
@@ -41,7 +40,7 @@ def fake_admin_check(self, req):
class AccountsTest(test.TestCase):
def setUp(self):
super(AccountsTest, self).setUp()
- self.stubs = stubout.StubOutForTesting()
+ self.flags(allow_admin_api=True)
self.stubs.Set(accounts.Controller, '__init__',
fake_init)
self.stubs.Set(accounts.Controller, '_check_admin',
@@ -52,8 +51,6 @@ class AccountsTest(test.TestCase):
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
- self.allow_admin = FLAGS.allow_admin_api
- FLAGS.allow_admin_api = True
fakemgr = fakes.FakeAuthManager()
joeuser = User('id1', 'guy1', 'acc1', 'secret1', False)
superuser = User('id2', 'guy2', 'acc2', 'secret2', True)
@@ -62,11 +59,6 @@ class AccountsTest(test.TestCase):
fakemgr.create_project('test1', joeuser)
fakemgr.create_project('test2', superuser)
- def tearDown(self):
- self.stubs.UnsetAll()
- FLAGS.allow_admin_api = self.allow_admin
- super(AccountsTest, self).tearDown()
-
def test_get_account(self):
req = webob.Request.blank('/v1.0/accounts/test1')
res = req.get_response(fakes.wsgi_app())
diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py
index e87255b18..b83de40cf 100644
--- a/nova/tests/api/openstack/test_adminapi.py
+++ b/nova/tests/api/openstack/test_adminapi.py
@@ -16,14 +16,10 @@
# under the License.
-import stubout
import webob
-from paste import urlmap
from nova import flags
from nova import test
-from nova.api import openstack
-from nova.api.openstack import auth
from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
@@ -33,21 +29,12 @@ class AdminAPITest(test.TestCase):
def setUp(self):
super(AdminAPITest, self).setUp()
- self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.reset_fake_data()
- fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_auth(self.stubs)
self.allow_admin = FLAGS.allow_admin_api
- def tearDown(self):
- self.stubs.UnsetAll()
- FLAGS.allow_admin_api = self.allow_admin
- super(AdminAPITest, self).tearDown()
-
def test_admin_enabled(self):
- FLAGS.allow_admin_api = True
+ self.flags(allow_admin_api=True)
# We should still be able to access public operations.
req = webob.Request.blank('/v1.0/flavors')
res = req.get_response(fakes.wsgi_app())
@@ -55,7 +42,7 @@ class AdminAPITest(test.TestCase):
# TODO: Confirm admin operations are available.
def test_admin_disabled(self):
- FLAGS.allow_admin_api = False
+ self.flags(allow_admin_api=False)
# We should still be able to access public operations.
req = webob.Request.blank('/v1.0/flavors')
res = req.get_response(fakes.wsgi_app())
diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py
index af3478c7d..306ae1aa0 100644
--- a/nova/tests/api/openstack/test_auth.py
+++ b/nova/tests/api/openstack/test_auth.py
@@ -17,14 +17,12 @@
import datetime
-import stubout
import webob
import webob.dec
import nova.api
import nova.api.openstack.auth
import nova.auth.manager
-from nova import auth
from nova import context
from nova import db
from nova import test
@@ -35,7 +33,6 @@ class Test(test.TestCase):
def setUp(self):
super(Test, self).setUp()
- self.stubs = stubout.StubOutForTesting()
self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fakes.fake_auth_init)
self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
@@ -45,7 +42,6 @@ class Test(test.TestCase):
fakes.stub_out_networking(self.stubs)
def tearDown(self):
- self.stubs.UnsetAll()
fakes.fake_data_store = {}
super(Test, self).tearDown()
@@ -57,7 +53,7 @@ class Test(test.TestCase):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '204 No Content')
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
self.assertEqual(result.headers['X-CDN-Management-Url'],
@@ -73,7 +69,7 @@ class Test(test.TestCase):
req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '204 No Content')
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
self.assertEqual(result.headers['X-Server-Management-Url'],
@@ -86,7 +82,7 @@ class Test(test.TestCase):
self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter)
req = webob.Request.blank('/v1.0/fake')
req.headers['X-Auth-Token'] = token
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '200 OK')
self.assertEqual(result.headers['X-Test-Success'], 'True')
@@ -110,7 +106,7 @@ class Test(test.TestCase):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-Token'] = 'token_hash'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '401 Unauthorized')
self.assertEqual(self.destroy_called, True)
@@ -124,7 +120,7 @@ class Test(test.TestCase):
req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '204 No Content')
token = result.headers['X-Auth-Token']
@@ -132,7 +128,7 @@ class Test(test.TestCase):
req = webob.Request.blank('/v1.0/fake')
req.headers['X-Auth-Token'] = token
req.headers['X-Auth-Project-Id'] = 'user2_project'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '200 OK')
self.assertEqual(result.headers['X-Test-Success'], 'True')
@@ -140,7 +136,7 @@ class Test(test.TestCase):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-User'] = 'unknown_user'
req.headers['X-Auth-Key'] = 'unknown_user_key'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '401 Unauthorized')
def test_bad_user_good_key(self):
@@ -151,18 +147,18 @@ class Test(test.TestCase):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-User'] = 'unknown_user'
req.headers['X-Auth-Key'] = 'user1_key'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '401 Unauthorized')
def test_no_user(self):
req = webob.Request.blank('/v1.0/')
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '401 Unauthorized')
def test_bad_token(self):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-Token'] = 'unknown_token'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '401 Unauthorized')
def test_bad_project(self):
@@ -177,7 +173,7 @@ class Test(test.TestCase):
req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '204 No Content')
token = result.headers['X-Auth-Token']
@@ -185,7 +181,7 @@ class Test(test.TestCase):
req = webob.Request.blank('/v1.0/fake')
req.headers['X-Auth-Token'] = token
req.headers['X-Auth-Project-Id'] = 'user2_project'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '401 Unauthorized')
def test_not_existing_project(self):
@@ -197,7 +193,7 @@ class Test(test.TestCase):
req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '204 No Content')
token = result.headers['X-Auth-Token']
@@ -205,7 +201,7 @@ class Test(test.TestCase):
req = webob.Request.blank('/v1.0/fake')
req.headers['X-Auth-Token'] = token
req.headers['X-Auth-Project-Id'] = 'unknown_project'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '401 Unauthorized')
@@ -226,20 +222,19 @@ class TestFunctional(test.TestCase):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-Token'] = 'test_token_hash'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '401 Unauthorized')
def test_token_doesnotexist(self):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-Token'] = 'nonexistant_token_hash'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '401 Unauthorized')
class TestLimiter(test.TestCase):
def setUp(self):
super(TestLimiter, self).setUp()
- self.stubs = stubout.StubOutForTesting()
self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fakes.fake_auth_init)
self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
@@ -248,7 +243,6 @@ class TestLimiter(test.TestCase):
fakes.stub_out_networking(self.stubs)
def tearDown(self):
- self.stubs.UnsetAll()
fakes.fake_data_store = {}
super(TestLimiter, self).tearDown()
@@ -261,7 +255,7 @@ class TestLimiter(test.TestCase):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-User'] = 'user1'
req.headers['X-Auth-Key'] = 'user1_key'
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
token = result.headers['X-Auth-Token']
@@ -269,6 +263,6 @@ class TestLimiter(test.TestCase):
req = webob.Request.blank('/v1.0/fake')
req.method = 'POST'
req.headers['X-Auth-Token'] = token
- result = req.get_response(fakes.wsgi_app())
+ result = req.get_response(fakes.wsgi_app(fake_auth=False))
self.assertEqual(result.status, '200 OK')
self.assertEqual(result.headers['X-Test-Success'], 'True')
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index f09270b34..0b76841f0 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -20,6 +20,7 @@ Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
import webob.exc
+import xml.dom.minidom as minidom
from webob import Request
@@ -265,3 +266,203 @@ class MiscFunctionsTest(test.TestCase):
expected = '1.0'
actual = common.get_version_from_href(fixture)
self.assertEqual(actual, expected)
+
+
+class MetadataXMLDeserializationTest(test.TestCase):
+
+ deserializer = common.MetadataXMLDeserializer()
+
+ def test_create(self):
+ request_body = """
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key='123'>asdf</meta>
+ <meta key='567'>jkl;</meta>
+ </metadata>"""
+ output = self.deserializer.deserialize(request_body, 'create')
+ expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
+ self.assertEquals(output, expected)
+
+ def test_create_empty(self):
+ request_body = """
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
+ output = self.deserializer.deserialize(request_body, 'create')
+ expected = {"body": {"metadata": {}}}
+ self.assertEquals(output, expected)
+
+ def test_update_all(self):
+ request_body = """
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key='123'>asdf</meta>
+ <meta key='567'>jkl;</meta>
+ </metadata>"""
+ output = self.deserializer.deserialize(request_body, 'update_all')
+ expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
+ self.assertEquals(output, expected)
+
+ def test_update(self):
+ request_body = """
+ <meta xmlns="http://docs.openstack.org/compute/api/v1.1"
+ key='123'>asdf</meta>"""
+ output = self.deserializer.deserialize(request_body, 'update')
+ expected = {"body": {"meta": {"123": "asdf"}}}
+ self.assertEquals(output, expected)
+
+
+class MetadataXMLSerializationTest(test.TestCase):
+
+ def test_index(self):
+ serializer = common.MetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ 'one': 'two',
+ 'three': 'four',
+ },
+ }
+ output = serializer.serialize(fixture, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="three">
+ four
+ </meta>
+ <meta key="one">
+ two
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_index_null(self):
+ serializer = common.MetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ None: None,
+ },
+ }
+ output = serializer.serialize(fixture, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="None">
+ None
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_index_unicode(self):
+ serializer = common.MetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ u'three': u'Jos\xe9',
+ },
+ }
+ output = serializer.serialize(fixture, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString(u"""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="three">
+ Jos\xe9
+ </meta>
+ </metadata>
+ """.encode("UTF-8").replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_show(self):
+ serializer = common.MetadataXMLSerializer()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.serialize(fixture, 'show')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
+ two
+ </meta>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_update_all(self):
+ serializer = common.MetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ 'key6': 'value6',
+ 'key4': 'value4',
+ },
+ }
+ output = serializer.serialize(fixture, 'update_all')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="key6">
+ value6
+ </meta>
+ <meta key="key4">
+ value4
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_update_item(self):
+ serializer = common.MetadataXMLSerializer()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.serialize(fixture, 'update')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
+ two
+ </meta>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_create(self):
+ serializer = common.MetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ 'key9': 'value9',
+ 'key2': 'value2',
+ 'key1': 'value1',
+ },
+ }
+ output = serializer.serialize(fixture, 'create')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="key2">
+ value2
+ </meta>
+ <meta key="key9">
+ value9
+ </meta>
+ <meta key="key1">
+ value1
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_delete(self):
+ serializer = common.MetadataXMLSerializer()
+ output = serializer.serialize(None, 'delete')
+ self.assertEqual(output, '')
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index d459c694f..47c37225c 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -17,13 +17,12 @@
import json
import os.path
-import stubout
-import unittest
import webob
from xml.etree import ElementTree
from nova import context
from nova import flags
+from nova import test
from nova.api import openstack
from nova.api.openstack import extensions
from nova.api.openstack import flavors
@@ -80,11 +79,12 @@ class StubExtensionManager(object):
return request_extensions
-class ExtensionControllerTest(unittest.TestCase):
+class ExtensionControllerTest(test.TestCase):
def setUp(self):
- FLAGS.osapi_extensions_path = os.path.join(
- os.path.dirname(__file__), "extensions")
+ super(ExtensionControllerTest, self).setUp()
+ ext_path = os.path.join(os.path.dirname(__file__), "extensions")
+ self.flags(osapi_extensions_path=ext_path)
def test_list_extensions_json(self):
app = openstack.APIRouterV11()
@@ -109,8 +109,8 @@ class ExtensionControllerTest(unittest.TestCase):
'updated': '2011-01-22T13:25:27-06:00',
'description': 'The Fox In Socks Extension',
'alias': 'FOXNSOX',
- 'links': []
- }
+ 'links': [],
+ },
)
def test_get_extension_json(self):
@@ -127,8 +127,8 @@ class ExtensionControllerTest(unittest.TestCase):
"updated": "2011-01-22T13:25:27-06:00",
"description": "The Fox In Socks Extension",
"alias": "FOXNSOX",
- "links": []
- }
+ "links": [],
+ },
)
def test_list_extensions_xml(self):
@@ -176,7 +176,12 @@ class ExtensionControllerTest(unittest.TestCase):
'The Fox In Socks Extension')
-class ResourceExtensionTest(unittest.TestCase):
+class ResourceExtensionTest(test.TestCase):
+
+ def setUp(self):
+ super(ResourceExtensionTest, self).setUp()
+ ext_path = os.path.join(os.path.dirname(__file__), "extensions")
+ self.flags(osapi_extensions_path=ext_path)
def test_no_extension_present(self):
manager = StubExtensionManager(None)
@@ -214,13 +219,14 @@ class InvalidExtension(object):
return "THIRD"
-class ExtensionManagerTest(unittest.TestCase):
+class ExtensionManagerTest(test.TestCase):
response_body = "Try to say this Mr. Knox, sir..."
def setUp(self):
- FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__),
- "extensions")
+ super(ExtensionManagerTest, self).setUp()
+ ext_path = os.path.join(os.path.dirname(__file__), "extensions")
+ self.flags(osapi_extensions_path=ext_path)
def test_get_resources(self):
app = openstack.APIRouterV11()
@@ -239,11 +245,12 @@ class ExtensionManagerTest(unittest.TestCase):
self.assertTrue('THIRD' not in ext_mgr.extensions)
-class ActionExtensionTest(unittest.TestCase):
+class ActionExtensionTest(test.TestCase):
def setUp(self):
- FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__),
- "extensions")
+ super(ActionExtensionTest, self).setUp()
+ ext_path = os.path.join(os.path.dirname(__file__), "extensions")
+ self.flags(osapi_extensions_path=ext_path)
def _send_server_action_request(self, url, body):
app = openstack.APIRouterV11()
@@ -277,19 +284,12 @@ class ActionExtensionTest(unittest.TestCase):
self.assertEqual(404, response.status_int)
-class RequestExtensionTest(unittest.TestCase):
+class RequestExtensionTest(test.TestCase):
def setUp(self):
super(RequestExtensionTest, self).setUp()
- self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.reset_fake_data()
- fakes.FakeAuthDatabase.data = {}
- fakes.stub_out_auth(self.stubs)
- self.context = context.get_admin_context()
-
- def tearDown(self):
- self.stubs.UnsetAll()
- super(RequestExtensionTest, self).tearDown()
+ ext_path = os.path.join(os.path.dirname(__file__), "extensions")
+ self.flags(osapi_extensions_path=ext_path)
def test_get_resources_with_stub_mgr(self):
@@ -327,7 +327,7 @@ class RequestExtensionTest(unittest.TestCase):
self.assertEqual("Pig Bands!", response_data['big_bands'])
-class ExtensionsXMLSerializerTest(unittest.TestCase):
+class ExtensionsXMLSerializerTest(test.TestCase):
def test_serialize_extenstion(self):
serializer = extensions.ExtensionsXMLSerializer()
@@ -342,15 +342,15 @@ class ExtensionsXMLSerializerTest(unittest.TestCase):
{
'rel': 'describedby',
'type': 'application/pdf',
- 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf'
+ 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf',
},
{
'rel': 'describedby',
'type': 'application/vnd.sun.wadl+xml',
- 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl'
- }
- ]
- }
+ 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl',
+ },
+ ],
+ },
}
xml = serializer.serialize(data, 'show')
@@ -382,14 +382,14 @@ class ExtensionsXMLSerializerTest(unittest.TestCase):
{
"rel": "describedby",
"type": "application/pdf",
- "href": "http://foo.com/api/ext/cs-pie.pdf"
+ "href": "http://foo.com/api/ext/cs-pie.pdf",
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
- "href": "http://foo.com/api/ext/cs-pie.wadl"
- }
- ]
+ "href": "http://foo.com/api/ext/cs-pie.wadl",
+ },
+ ],
},
{
"name": "Cloud Block Storage",
@@ -401,16 +401,16 @@ class ExtensionsXMLSerializerTest(unittest.TestCase):
{
"rel": "describedby",
"type": "application/pdf",
- "href": "http://foo.com/api/ext/cs-cbs.pdf"
+ "href": "http://foo.com/api/ext/cs-cbs.pdf",
},
{
"rel": "describedby",
"type": "application/vnd.sun.wadl+xml",
- "href": "http://foo.com/api/ext/cs-cbs.wadl"
- }
- ]
- }
- ]
+ "href": "http://foo.com/api/ext/cs-cbs.wadl",
+ },
+ ],
+ },
+ ],
}
xml = serializer.serialize(data, 'index')
diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py
index 4ac35b26b..d0fe72001 100644
--- a/nova/tests/api/openstack/test_flavors.py
+++ b/nova/tests/api/openstack/test_flavors.py
@@ -16,7 +16,6 @@
# under the License.
import json
-import stubout
import webob
import xml.dom.minidom as minidom
@@ -56,12 +55,8 @@ def return_instance_type_not_found(context, flavor_id):
class FlavorsTest(test.TestCase):
def setUp(self):
super(FlavorsTest, self).setUp()
- self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.reset_fake_data()
- fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_auth(self.stubs)
self.stubs.Set(nova.db.api, "instance_type_get_all",
return_instance_types)
self.stubs.Set(nova.db.api, "instance_type_get_by_flavor_id",
diff --git a/nova/tests/api/openstack/extensions/test_flavors_extra_specs.py b/nova/tests/api/openstack/test_flavors_extra_specs.py
index 2c1c335b0..d386958db 100644
--- a/nova/tests/api/openstack/extensions/test_flavors_extra_specs.py
+++ b/nova/tests/api/openstack/test_flavors_extra_specs.py
@@ -17,14 +17,13 @@
import json
import stubout
-import unittest
import webob
import os.path
from nova import flags
+from nova import test
from nova.api import openstack
-from nova.api.openstack import auth
from nova.api.openstack import extensions
from nova.tests.api.openstack import fakes
import nova.wsgi
@@ -40,10 +39,6 @@ def return_flavor_extra_specs(context, flavor_id):
return stub_flavor_extra_specs()
-def return_flavor_extra_specs(context, flavor_id):
- return stub_flavor_extra_specs()
-
-
def return_empty_flavor_extra_specs(context, flavor_id):
return {}
@@ -62,30 +57,17 @@ def stub_flavor_extra_specs():
return specs
-class FlavorsExtraSpecsTest(unittest.TestCase):
+class FlavorsExtraSpecsTest(test.TestCase):
def setUp(self):
super(FlavorsExtraSpecsTest, self).setUp()
- FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__),
- "extensions")
- self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
- fakes.FakeAuthDatabase.data = {}
- fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
- self.mware = auth.AuthMiddleware(
- extensions.ExtensionMiddleware(
- openstack.APIRouterV11()))
-
- def tearDown(self):
- self.stubs.UnsetAll()
- super(FlavorsExtraSpecsTest, self).tearDown()
def test_index(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_flavor_extra_specs)
- request = webob.Request.blank('/flavors/1/os-extra_specs')
- res = request.get_response(self.mware)
+ request = webob.Request.blank('/v1.1/flavors/1/os-extra_specs')
+ res = request.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
self.assertEqual('application/json', res.headers['Content-Type'])
@@ -94,8 +76,8 @@ class FlavorsExtraSpecsTest(unittest.TestCase):
def test_index_no_data(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_empty_flavor_extra_specs)
- req = webob.Request.blank('/flavors/1/os-extra_specs')
- res = req.get_response(self.mware)
+ req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs')
+ res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('application/json', res.headers['Content-Type'])
@@ -104,8 +86,8 @@ class FlavorsExtraSpecsTest(unittest.TestCase):
def test_show(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_flavor_extra_specs)
- req = webob.Request.blank('/flavors/1/os-extra_specs/key5')
- res = req.get_response(self.mware)
+ req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/key5')
+ res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
self.assertEqual('application/json', res.headers['Content-Type'])
@@ -114,28 +96,28 @@ class FlavorsExtraSpecsTest(unittest.TestCase):
def test_show_spec_not_found(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
return_empty_flavor_extra_specs)
- req = webob.Request.blank('/flavors/1/os-extra_specs/key6')
- res = req.get_response(self.mware)
+ req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/key6')
+ res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
def test_delete(self):
self.stubs.Set(nova.db.api, 'instance_type_extra_specs_delete',
delete_flavor_extra_specs)
- req = webob.Request.blank('/flavors/1/os-extra_specs/key5')
+ req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/key5')
req.method = 'DELETE'
- res = req.get_response(self.mware)
+ res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
def test_create(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
- req = webob.Request.blank('/flavors/1/os-extra_specs')
+ req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs')
req.method = 'POST'
req.body = '{"extra_specs": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
- res = req.get_response(self.mware)
+ res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
self.assertEqual('application/json', res.headers['Content-Type'])
@@ -145,21 +127,21 @@ class FlavorsExtraSpecsTest(unittest.TestCase):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
- req = webob.Request.blank('/flavors/1/os-extra_specs')
+ req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs')
req.method = 'POST'
req.headers["content-type"] = "application/json"
- res = req.get_response(self.mware)
+ res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_update_item(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
- req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
+ req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/key1')
req.method = 'PUT'
req.body = '{"key1": "value1"}'
req.headers["content-type"] = "application/json"
- res = req.get_response(self.mware)
+ res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
self.assertEqual('application/json', res.headers['Content-Type'])
res_dict = json.loads(res.body)
@@ -169,30 +151,30 @@ class FlavorsExtraSpecsTest(unittest.TestCase):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
- req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
+ req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
- res = req.get_response(self.mware)
+ res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_update_item_too_many_keys(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
- req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
+ req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/key1')
req.method = 'PUT'
req.body = '{"key1": "value1", "key2": "value2"}'
req.headers["content-type"] = "application/json"
- res = req.get_response(self.mware)
+ res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(nova.db.api,
'instance_type_extra_specs_update_or_create',
return_create_flavor_extra_specs)
- req = webob.Request.blank('/flavors/1/os-extra_specs/bad')
+ req = webob.Request.blank('/v1.1/flavors/1/os-extra_specs/bad')
req.method = 'PUT'
req.body = '{"key1": "value1"}'
req.headers["content-type"] = "application/json"
- res = req.get_response(self.mware)
+ res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py
index 31ca18497..56a0932e7 100644
--- a/nova/tests/api/openstack/test_image_metadata.py
+++ b/nova/tests/api/openstack/test_image_metadata.py
@@ -16,10 +16,7 @@
# under the License.
import json
-import stubout
-import unittest
import webob
-import xml.dom.minidom as minidom
from nova import flags
@@ -85,23 +82,13 @@ class ImageMetaDataTest(test.TestCase):
def setUp(self):
super(ImageMetaDataTest, self).setUp()
- self.stubs = stubout.StubOutForTesting()
- self.orig_image_service = FLAGS.image_service
- FLAGS.image_service = 'nova.image.glance.GlanceImageService'
- fakes.FakeAuthManager.auth_data = {}
- fakes.FakeAuthDatabase.data = {}
- fakes.stub_out_auth(self.stubs)
+ self.flags(image_service='nova.image.glance.GlanceImageService')
# NOTE(dprince) max out properties/metadata in image 3 for testing
img3 = self.IMAGE_FIXTURES[2]
for num in range(FLAGS.quota_metadata_items):
img3['properties']['key%i' % num] = "blah"
fakes.stub_out_glance(self.stubs, self.IMAGE_FIXTURES)
- def tearDown(self):
- self.stubs.UnsetAll()
- FLAGS.image_service = self.orig_image_service
- super(ImageMetaDataTest, self).tearDown()
-
def test_index(self):
req = webob.Request.blank('/v1.1/images/1/metadata')
res = req.get_response(fakes.wsgi_app())
@@ -252,203 +239,3 @@ class ImageMetaDataTest(test.TestCase):
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
-
-
-class ImageMetadataXMLDeserializationTest(test.TestCase):
-
- deserializer = openstack.image_metadata.ImageMetadataXMLDeserializer()
-
- def test_create(self):
- request_body = """
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key='123'>asdf</meta>
- <meta key='567'>jkl;</meta>
- </metadata>"""
- output = self.deserializer.deserialize(request_body, 'create')
- expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
- self.assertEquals(output, expected)
-
- def test_create_empty(self):
- request_body = """
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
- output = self.deserializer.deserialize(request_body, 'create')
- expected = {"body": {"metadata": {}}}
- self.assertEquals(output, expected)
-
- def test_update_all(self):
- request_body = """
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key='123'>asdf</meta>
- <meta key='567'>jkl;</meta>
- </metadata>"""
- output = self.deserializer.deserialize(request_body, 'update_all')
- expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
- self.assertEquals(output, expected)
-
- def test_update(self):
- request_body = """
- <meta xmlns="http://docs.openstack.org/compute/api/v1.1"
- key='123'>asdf</meta>"""
- output = self.deserializer.deserialize(request_body, 'update')
- expected = {"body": {"meta": {"123": "asdf"}}}
- self.assertEquals(output, expected)
-
-
-class ImageMetadataXMLSerializationTest(test.TestCase):
-
- def test_index(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'metadata': {
- 'one': 'two',
- 'three': 'four',
- },
- }
- output = serializer.serialize(fixture, 'index')
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key="three">
- four
- </meta>
- <meta key="one">
- two
- </meta>
- </metadata>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
- def test_index_null(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'metadata': {
- None: None,
- },
- }
- output = serializer.serialize(fixture, 'index')
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key="None">
- None
- </meta>
- </metadata>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
- def test_index_unicode(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'metadata': {
- u'three': u'Jos\xe9',
- },
- }
- output = serializer.serialize(fixture, 'index')
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString(u"""
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key="three">
- Jos\xe9
- </meta>
- </metadata>
- """.encode("UTF-8").replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
- def test_show(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'meta': {
- 'one': 'two',
- },
- }
- output = serializer.serialize(fixture, 'show')
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
- two
- </meta>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
- def test_update_all(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'metadata': {
- 'key6': 'value6',
- 'key4': 'value4',
- },
- }
- output = serializer.serialize(fixture, 'update_all')
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key="key6">
- value6
- </meta>
- <meta key="key4">
- value4
- </meta>
- </metadata>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
- def test_update_item(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'meta': {
- 'one': 'two',
- },
- }
- output = serializer.serialize(fixture, 'update')
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
- two
- </meta>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
- def test_create(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'metadata': {
- 'key9': 'value9',
- 'key2': 'value2',
- 'key1': 'value1',
- },
- }
- output = serializer.serialize(fixture, 'create')
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key="key2">
- value2
- </meta>
- <meta key="key9">
- value9
- </meta>
- <meta key="key1">
- value1
- </meta>
- </metadata>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
- def test_delete(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- output = serializer.serialize(None, 'delete')
- self.assertEqual(output, '')
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 87a695dde..942c0b333 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -155,7 +155,7 @@ class GlanceImageServiceTest(_BaseImageServiceTests):
fakes.stub_out_compute_api_snapshot(self.stubs)
service_class = 'nova.image.glance.GlanceImageService'
self.service = utils.import_object(service_class)
- self.context = context.RequestContext(1, None)
+ self.context = context.RequestContext('fake', 'fake')
self.service.delete_all()
self.sent_to_glance = {}
fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance)
@@ -168,7 +168,7 @@ class GlanceImageServiceTest(_BaseImageServiceTests):
"""Ensure instance_id is persisted as an image-property"""
fixture = {'name': 'test image',
'is_public': False,
- 'properties': {'instance_id': '42', 'user_id': '1'}}
+ 'properties': {'instance_id': '42', 'user_id': 'fake'}}
image_id = self.service.create(self.context, fixture)['id']
expected = fixture
@@ -178,7 +178,7 @@ class GlanceImageServiceTest(_BaseImageServiceTests):
expected = {'id': image_id,
'name': 'test image',
'is_public': False,
- 'properties': {'instance_id': '42', 'user_id': '1'}}
+ 'properties': {'instance_id': '42', 'user_id': 'fake'}}
self.assertDictMatch(image_meta, expected)
image_metas = self.service.detail(self.context)
@@ -331,11 +331,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.orig_image_service = FLAGS.image_service
FLAGS.image_service = 'nova.image.glance.GlanceImageService'
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.reset_fake_data()
- fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
self.fixtures = self._make_image_fixtures()
fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures)
@@ -352,7 +349,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
"""Determine if this fixture is applicable for given user id."""
is_public = fixture["is_public"]
try:
- uid = int(fixture["properties"]["user_id"])
+ uid = fixture["properties"]["user_id"]
except KeyError:
uid = None
return uid == user_id or is_public
@@ -424,7 +421,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
"metadata": {
"instance_ref": "http://localhost/v1.1/servers/42",
- "user_id": "1",
+ "user_id": "fake",
},
"links": [{
"rel": "self",
@@ -559,7 +556,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
fixtures = copy.copy(self.fixtures)
for image in fixtures:
- if not self._applicable_fixture(image, 1):
+ if not self._applicable_fixture(image, "fake"):
fixtures.remove(image)
continue
@@ -666,7 +663,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
'name': 'queued snapshot',
'metadata': {
u'instance_ref': u'http://localhost/v1.1/servers/42',
- u'user_id': u'1',
+ u'user_id': u'fake',
},
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
@@ -696,7 +693,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
'name': 'saving snapshot',
'metadata': {
u'instance_ref': u'http://localhost/v1.1/servers/42',
- u'user_id': u'1',
+ u'user_id': u'fake',
},
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
@@ -727,7 +724,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
'name': 'active snapshot',
'metadata': {
u'instance_ref': u'http://localhost/v1.1/servers/42',
- u'user_id': u'1',
+ u'user_id': u'fake',
},
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
@@ -757,7 +754,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
'name': 'killed snapshot',
'metadata': {
u'instance_ref': u'http://localhost/v1.1/servers/42',
- u'user_id': u'1',
+ u'user_id': u'fake',
},
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
@@ -1045,82 +1042,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
- def test_create_backup_no_name(self):
- """Name is also required for backups"""
- body = dict(image=dict(serverId='123', image_type='backup',
- backup_type='daily', rotation=1))
- req = webob.Request.blank('/v1.0/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
- def test_create_backup_with_rotation_and_backup_type(self):
- """The happy path for creating backups
-
- Creating a backup is an admin-only operation, as opposed to snapshots
- which are available to anybody.
- """
- # FIXME(sirp): teardown needed?
- FLAGS.allow_admin_api = True
-
- # FIXME(sirp): should the fact that backups are admin_only be a FLAG
- body = dict(image=dict(serverId='123', image_type='backup',
- name='Backup 1',
- backup_type='daily', rotation=1))
- req = webob.Request.blank('/v1.0/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, response.status_int)
-
- def test_create_backup_no_rotation(self):
- """Rotation is required for backup requests"""
- # FIXME(sirp): teardown needed?
- FLAGS.allow_admin_api = True
-
- # FIXME(sirp): should the fact that backups are admin_only be a FLAG
- body = dict(image=dict(serverId='123', name='daily',
- image_type='backup', backup_type='daily'))
- req = webob.Request.blank('/v1.0/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
- def test_create_backup_no_backup_type(self):
- """Backup Type (daily or weekly) is required for backup requests"""
- # FIXME(sirp): teardown needed?
- FLAGS.allow_admin_api = True
-
- # FIXME(sirp): should the fact that backups are admin_only be a FLAG
- body = dict(image=dict(serverId='123', name='daily',
- image_type='backup', rotation=1))
- req = webob.Request.blank('/v1.0/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
- def test_create_image_with_invalid_image_type(self):
- """Valid image_types are snapshot | daily | weekly"""
- # FIXME(sirp): teardown needed?
- FLAGS.allow_admin_api = True
-
- # FIXME(sirp): should the fact that backups are admin_only be a FLAG
- body = dict(image=dict(serverId='123', image_type='monthly',
- rotation=1))
- req = webob.Request.blank('/v1.0/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
def test_create_image_no_server_id(self):
body = dict(image=dict(name='Snapshot 1'))
@@ -1131,113 +1052,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
- def test_create_image_v1_1(self):
-
- body = dict(image=dict(serverRef='123', name='Snapshot 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, response.status_int)
-
- def test_create_image_v1_1_actual_server_ref(self):
-
- serverRef = 'http://localhost/v1.1/servers/1'
- serverBookmark = 'http://localhost/servers/1'
- body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, response.status_int)
- result = json.loads(response.body)
- expected = {
- 'id': 1,
- 'links': [
- {
- 'rel': 'self',
- 'href': serverRef,
- },
- {
- 'rel': 'bookmark',
- 'href': serverBookmark,
- },
- ]
- }
- self.assertEqual(result['image']['server'], expected)
-
- def test_create_image_v1_1_actual_server_ref_port(self):
-
- serverRef = 'http://localhost:8774/v1.1/servers/1'
- serverBookmark = 'http://localhost:8774/servers/1'
- body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, response.status_int)
- result = json.loads(response.body)
- expected = {
- 'id': 1,
- 'links': [
- {
- 'rel': 'self',
- 'href': serverRef,
- },
- {
- 'rel': 'bookmark',
- 'href': serverBookmark,
- },
- ]
- }
- self.assertEqual(result['image']['server'], expected)
-
- def test_create_image_v1_1_server_ref_bad_hostname(self):
-
- serverRef = 'http://asdf/v1.1/servers/1'
- body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
- def test_create_image_v1_1_no_server_ref(self):
-
- body = dict(image=dict(name='Snapshot 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
- def test_create_image_v1_1_server_ref_missing_version(self):
-
- serverRef = 'http://localhost/servers/1'
- body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
- def test_create_image_v1_1_server_ref_missing_id(self):
-
- serverRef = 'http://localhost/v1.1/servers'
- body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
@classmethod
def _make_image_fixtures(cls):
image_id = 123
@@ -1259,7 +1073,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
# Snapshot for User 1
server_ref = 'http://localhost/v1.1/servers/42'
- snapshot_properties = {'instance_ref': server_ref, 'user_id': '1'}
+ snapshot_properties = {'instance_ref': server_ref, 'user_id': 'fake'}
for status in ('queued', 'saving', 'active', 'killed'):
add_fixture(id=image_id, name='%s snapshot' % status,
is_public=False, status=status,
@@ -1267,7 +1081,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_id += 1
# Snapshot for User 2
- other_snapshot_properties = {'instance_id': '43', 'user_id': '2'}
+ other_snapshot_properties = {'instance_id': '43', 'user_id': 'other'}
add_fixture(id=image_id, name='someone elses snapshot',
is_public=False, status='active',
properties=other_snapshot_properties)
@@ -1716,76 +1530,3 @@ class ImageXMLSerializationTest(test.TestCase):
""".replace(" ", "") % (locals()))
self.assertEqual(expected.toxml(), actual.toxml())
-
- def test_create(self):
- serializer = images.ImageXMLSerializer()
-
- fixture = {
- 'image': {
- 'id': 1,
- 'name': 'Image1',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- 'status': 'SAVING',
- 'progress': 80,
- 'server': {
- 'id': 1,
- 'links': [
- {
- 'href': self.SERVER_HREF,
- 'rel': 'self',
- },
- {
- 'href': self.SERVER_BOOKMARK,
- 'rel': 'bookmark',
- },
- ],
- },
- 'metadata': {
- 'key1': 'value1',
- },
- 'links': [
- {
- 'href': self.IMAGE_HREF % 1,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 1,
- 'rel': 'bookmark',
- },
- ],
- },
- }
-
- output = serializer.serialize(fixture, 'create')
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected_server_href = self.SERVER_HREF
- expected_server_bookmark = self.SERVER_BOOKMARK
- expected_href = self.IMAGE_HREF % 1
- expected_bookmark = self.IMAGE_BOOKMARK % 1
- expected_now = self.TIMESTAMP
- expected = minidom.parseString("""
- <image id="1"
- xmlns="http://docs.openstack.org/compute/api/v1.1"
- xmlns:atom="http://www.w3.org/2005/Atom"
- name="Image1"
- updated="%(expected_now)s"
- created="%(expected_now)s"
- status="SAVING"
- progress="80">
- <server id="1">
- <atom:link rel="self" href="%(expected_server_href)s"/>
- <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/>
- </server>
- <metadata>
- <meta key="key1">
- value1
- </meta>
- </metadata>
- <atom:link href="%(expected_href)s" rel="self"/>
- <atom:link href="%(expected_bookmark)s" rel="bookmark"/>
- </image>
- """.replace(" ", "") % (locals()))
-
- self.assertEqual(expected.toxml(), actual.toxml())
diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py
index 8a3fe681a..6c3d531e3 100644
--- a/nova/tests/api/openstack/test_limits.py
+++ b/nova/tests/api/openstack/test_limits.py
@@ -920,7 +920,7 @@ class LimitsViewBuilderV11Test(test.TestCase):
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
- "resetTime": 1311272226
+ "resetTime": 1311272226,
},
{
"URI": "*/servers",
@@ -929,7 +929,7 @@ class LimitsViewBuilderV11Test(test.TestCase):
"verb": "POST",
"remaining": 10,
"unit": "DAY",
- "resetTime": 1311272226
+ "resetTime": 1311272226,
},
]
self.absolute_limits = {
@@ -954,7 +954,7 @@ class LimitsViewBuilderV11Test(test.TestCase):
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
- "next-available": "2011-07-21T18:17:06Z"
+ "next-available": "2011-07-21T18:17:06Z",
},
]
},
@@ -967,7 +967,7 @@ class LimitsViewBuilderV11Test(test.TestCase):
"verb": "POST",
"remaining": 10,
"unit": "DAY",
- "next-available": "2011-07-21T18:17:06Z"
+ "next-available": "2011-07-21T18:17:06Z",
},
]
},
@@ -989,7 +989,7 @@ class LimitsViewBuilderV11Test(test.TestCase):
expected_limits = {
"limits": {
"rate": [],
- "absolute": {}
+ "absolute": {},
}
}
@@ -1022,7 +1022,7 @@ class LimitsXMLSerializationTest(test.TestCase):
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
- "next-available": "2011-12-15T22:42:45Z"
+ "next-available": "2011-12-15T22:42:45Z",
},
]
},
@@ -1083,7 +1083,7 @@ class LimitsXMLSerializationTest(test.TestCase):
fixture = {
"limits": {
"rate": [],
- "absolute": {}
+ "absolute": {},
}
}
diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py
index 0431e68d2..f90485067 100644
--- a/nova/tests/api/openstack/test_server_metadata.py
+++ b/nova/tests/api/openstack/test_server_metadata.py
@@ -16,14 +16,12 @@
# under the License.
import json
-import stubout
-import unittest
import webob
from nova import exception
from nova import flags
-from nova.api import openstack
+from nova import test
from nova.tests.api.openstack import fakes
import nova.wsgi
@@ -76,21 +74,13 @@ def return_server_nonexistant(context, server_id):
raise exception.InstanceNotFound()
-class ServerMetaDataTest(unittest.TestCase):
+class ServerMetaDataTest(test.TestCase):
def setUp(self):
super(ServerMetaDataTest, self).setUp()
- self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
- fakes.FakeAuthDatabase.data = {}
- fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
self.stubs.Set(nova.db.api, 'instance_get', return_server)
- def tearDown(self):
- self.stubs.UnsetAll()
- super(ServerMetaDataTest, self).tearDown()
-
def test_index(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
return_server_metadata)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 4ca79434f..4d42972c1 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -16,11 +16,11 @@
# under the License.
import base64
+import datetime
import json
import unittest
from xml.dom import minidom
-import stubout
import webob
from nova import context
@@ -103,8 +103,8 @@ def return_server_with_uuid_and_power_state(power_state):
return _return_server
-def return_servers(context, user_id=1):
- return [stub_instance(i, user_id) for i in xrange(5)]
+def return_servers(context, *args, **kwargs):
+ return [stub_instance(i, 'fake', 'fake') for i in xrange(5)]
def return_servers_by_reservation(context, reservation_id=""):
@@ -147,10 +147,10 @@ def instance_addresses(context, instance_id):
return None
-def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
- host=None, power_state=0, reservation_id="",
- uuid=FAKE_UUID, image_ref="10", flavor_id="1",
- interfaces=None):
+def stub_instance(id, user_id='fake', project_id='fake', private_address=None,
+ public_addresses=None, host=None, power_state=0,
+ reservation_id="", uuid=FAKE_UUID, image_ref="10",
+ flavor_id="1", interfaces=None):
metadata = []
metadata.append(InstanceMetadata(key='seq', value=id))
@@ -172,11 +172,11 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
instance = {
"id": int(id),
- "created_at": "2010-10-10T12:00:00Z",
- "updated_at": "2010-11-11T11:00:00Z",
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"admin_pass": "",
"user_id": user_id,
- "project_id": "",
+ "project_id": project_id,
"image_ref": image_ref,
"kernel_id": "",
"ramdisk_id": "",
@@ -235,12 +235,8 @@ class ServersTest(test.TestCase):
def setUp(self):
self.maxDiff = None
super(ServersTest, self).setUp()
- self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.reset_fake_data()
- fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_image_service(self.stubs)
self.stubs.Set(utils, 'gen_uuid', fake_gen_uuid)
@@ -248,7 +244,7 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
return_server_by_uuid)
- self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
+ self.stubs.Set(nova.db.api, 'instance_get_all_by_project',
return_servers)
self.stubs.Set(nova.db.api, 'instance_add_security_group',
return_security_group)
@@ -263,15 +259,20 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.compute.API, 'resume', fake_compute_api)
self.stubs.Set(nova.compute.API, "get_diagnostics", fake_compute_api)
self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api)
+
+ fakes.stub_out_glance(self.stubs)
+ fakes.stub_out_compute_api_snapshot(self.stubs)
+ service_class = 'nova.image.glance.GlanceImageService'
+ self.service = utils.import_object(service_class)
+ self.context = context.RequestContext(1, None)
+ self.service.delete_all()
+ self.sent_to_glance = {}
+ fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance)
+
self.allow_admin = FLAGS.allow_admin_api
self.webreq = common.webob_factory('/v1.0/servers')
- def tearDown(self):
- self.stubs.UnsetAll()
- FLAGS.allow_admin_api = self.allow_admin
- super(ServersTest, self).tearDown()
-
def test_get_server_by_id(self):
req = webob.Request.blank('/v1.0/servers/1')
res = req.get_response(fakes.wsgi_app())
@@ -399,6 +400,78 @@ class ServersTest(test.TestCase):
self.assertDictMatch(res_dict, expected_server)
+ def test_get_server_by_id_v1_1_xml(self):
+ image_bookmark = "http://localhost/images/10"
+ flavor_ref = "http://localhost/v1.1/flavors/1"
+ flavor_id = "1"
+ flavor_bookmark = "http://localhost/flavors/1"
+ server_href = "http://localhost/v1.1/servers/1"
+ server_bookmark = "http://localhost/servers/1"
+
+ public_ip = '192.168.0.3'
+ private_ip = '172.19.0.1'
+ interfaces = [
+ {
+ 'network': {'label': 'public'},
+ 'fixed_ips': [
+ {'address': public_ip},
+ ],
+ },
+ {
+ 'network': {'label': 'private'},
+ 'fixed_ips': [
+ {'address': private_ip},
+ ],
+ },
+ ]
+ new_return_server = return_server_with_attributes(
+ interfaces=interfaces)
+ self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
+
+ req = webob.Request.blank('/v1.1/servers/1')
+ req.headers['Accept'] = 'application/xml'
+ res = req.get_response(fakes.wsgi_app())
+ actual = minidom.parseString(res.body.replace(' ', ''))
+ expected_uuid = FAKE_UUID
+ expected_updated = "2010-11-11T11:00:00Z"
+ expected_created = "2010-10-10T12:00:00Z"
+ expected = minidom.parseString("""
+ <server id="1"
+ uuid="%(expected_uuid)s"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ name="server1"
+ updated="%(expected_updated)s"
+ created="%(expected_created)s"
+ hostId=""
+ status="BUILD"
+ progress="0">
+ <atom:link href="%(server_href)s" rel="self"/>
+ <atom:link href="%(server_bookmark)s" rel="bookmark"/>
+ <image id="10">
+ <atom:link rel="bookmark" href="%(image_bookmark)s"/>
+ </image>
+ <flavor id="1">
+ <atom:link rel="bookmark" href="%(flavor_bookmark)s"/>
+ </flavor>
+ <metadata>
+ <meta key="seq">
+ 1
+ </meta>
+ </metadata>
+ <addresses>
+ <network id="public">
+ <ip version="4" addr="%(public_ip)s"/>
+ </network>
+ <network id="private">
+ <ip version="4" addr="%(private_ip)s"/>
+ </network>
+ </addresses>
+ </server>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
def test_get_server_with_active_status_by_id_v1_1(self):
image_bookmark = "http://localhost/images/10"
flavor_ref = "http://localhost/v1.1/flavors/1"
@@ -899,6 +972,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
+ self.assertEqual(len(res_dict['servers']), 5)
i = 0
for s in res_dict['servers']:
self.assertEqual(s['id'], i)
@@ -962,6 +1036,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
+ self.assertEqual(len(res_dict['servers']), 5)
for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], i)
self.assertEqual(s['name'], 'server%d' % i)
@@ -1048,8 +1123,8 @@ class ServersTest(test.TestCase):
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'image_ref': image_ref,
- 'created_at': '2010-10-10T12:00:00Z',
- 'updated_at': '2010-11-11T11:00:00Z',
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
}
def server_update(context, id, params):
@@ -1099,6 +1174,7 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
server = json.loads(res.body)['server']
self.assertEqual(16, len(server['adminPass']))
self.assertEqual('server_test', server['name'])
@@ -1106,7 +1182,6 @@ class ServersTest(test.TestCase):
self.assertEqual(2, server['flavorId'])
self.assertEqual(3, server['imageId'])
self.assertEqual(FAKE_UUID, server['uuid'])
- self.assertEqual(res.status_int, 200)
def test_create_instance(self):
self._test_create_instance_helper()
@@ -1124,7 +1199,7 @@ class ServersTest(test.TestCase):
def test_create_instance_via_zones(self):
"""Server generated ReservationID"""
self._setup_for_create_instance()
- FLAGS.allow_admin_api = True
+ self.flags(allow_admin_api=True)
body = dict(server=dict(
name='server_test', imageId=3, flavorId=2,
@@ -1146,7 +1221,7 @@ class ServersTest(test.TestCase):
def test_create_instance_via_zones_with_resid(self):
"""User supplied ReservationID"""
self._setup_for_create_instance()
- FLAGS.allow_admin_api = True
+ self.flags(allow_admin_api=True)
body = dict(server=dict(
name='server_test', imageId=3, flavorId=2,
@@ -1250,7 +1325,8 @@ class ServersTest(test.TestCase):
def test_create_instance_v1_1(self):
self._setup_for_create_instance()
- image_href = 'http://localhost/images/2'
+ # proper local hrefs must start with 'http://localhost/v1.1/'
+ image_href = 'http://localhost/v1.1/images/2'
flavor_ref = 'http://localhost/flavors/3'
expected_flavor = {
"id": "3",
@@ -1279,7 +1355,12 @@ class ServersTest(test.TestCase):
'hello': 'world',
'open': 'stack',
},
- 'personality': {},
+ 'personality': [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "MQ==",
+ },
+ ],
},
}
@@ -1293,11 +1374,11 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 200)
server = json.loads(res.body)['server']
self.assertEqual(16, len(server['adminPass']))
+ self.assertEqual(1, server['id'])
+ self.assertEqual(0, server['progress'])
self.assertEqual('server_test', server['name'])
self.assertEqual(expected_flavor, server['flavor'])
self.assertEqual(expected_image, server['image'])
- self.assertEqual(res.status_int, 200)
- #self.assertEqual(1, server['id'])
def test_create_instance_v1_1_invalid_flavor_href(self):
self._setup_for_create_instance()
@@ -1351,7 +1432,7 @@ class ServersTest(test.TestCase):
self._setup_for_create_instance()
image_id = "2"
- flavor_ref = 'http://localhost/flavors/3'
+ flavor_ref = 'http://localhost/v1.1/flavors/3'
expected_flavor = {
"id": "3",
"links": [
@@ -1385,10 +1466,10 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
server = json.loads(res.body)['server']
self.assertEqual(expected_flavor, server['flavor'])
self.assertEqual(expected_image, server['image'])
- self.assertEqual(res.status_int, 200)
def test_create_instance_with_admin_pass_v1_0(self):
self._setup_for_create_instance()
@@ -1411,7 +1492,7 @@ class ServersTest(test.TestCase):
self.assertNotEqual(res['server']['adminPass'],
body['server']['adminPass'])
- def test_create_instance_with_admin_pass_v1_1(self):
+ def test_create_instance_v1_1_admin_pass(self):
self._setup_for_create_instance()
image_href = 'http://localhost/v1.1/images/2'
@@ -1419,8 +1500,8 @@ class ServersTest(test.TestCase):
body = {
'server': {
'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
+ 'imageRef': 3,
+ 'flavorRef': 3,
'adminPass': 'testpass',
},
}
@@ -1430,19 +1511,18 @@ class ServersTest(test.TestCase):
req.body = json.dumps(body)
req.headers['content-type'] = "application/json"
res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
server = json.loads(res.body)['server']
self.assertEqual(server['adminPass'], body['server']['adminPass'])
- def test_create_instance_with_empty_admin_pass_v1_1(self):
+ def test_create_instance_v1_1_admin_pass_empty(self):
self._setup_for_create_instance()
- image_href = 'http://localhost/v1.1/images/2'
- flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
+ 'imageRef': 3,
+ 'flavorRef': 3,
'adminPass': '',
},
}
@@ -1651,10 +1731,11 @@ class ServersTest(test.TestCase):
instances - 2 on one host and 3 on another.
'''
- def return_servers_with_host(context, user_id=1):
- return [stub_instance(i, 1, None, None, i % 2) for i in xrange(5)]
+ def return_servers_with_host(context, *args, **kwargs):
+ return [stub_instance(i, 'fake', 'fake', None, None, i % 2)
+ for i in xrange(5)]
- self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
+ self.stubs.Set(nova.db.api, 'instance_get_all_by_project',
return_servers_with_host)
req = webob.Request.blank('/v1.0/servers/detail')
@@ -1674,7 +1755,7 @@ class ServersTest(test.TestCase):
self.assertEqual(s['flavorId'], 1)
def test_server_pause(self):
- FLAGS.allow_admin_api = True
+ self.flags(allow_admin_api=True)
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
@@ -1686,7 +1767,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 202)
def test_server_unpause(self):
- FLAGS.allow_admin_api = True
+ self.flags(allow_admin_api=True)
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
@@ -1698,7 +1779,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 202)
def test_server_suspend(self):
- FLAGS.allow_admin_api = True
+ self.flags(allow_admin_api=True)
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
@@ -1710,7 +1791,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 202)
def test_server_resume(self):
- FLAGS.allow_admin_api = True
+ self.flags(allow_admin_api=True)
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
@@ -1722,7 +1803,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 202)
def test_server_reset_network(self):
- FLAGS.allow_admin_api = True
+ self.flags(allow_admin_api=True)
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
@@ -1734,7 +1815,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 202)
def test_server_inject_network_info(self):
- FLAGS.allow_admin_api = True
+ self.flags(allow_admin_api=True)
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
@@ -2025,7 +2106,7 @@ class ServersTest(test.TestCase):
self.assertEqual(self.server_delete_called, True)
def test_rescue_accepted(self):
- FLAGS.allow_admin_api = True
+ self.flags(allow_admin_api=True)
body = {}
self.called = False
@@ -2044,7 +2125,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 202)
def test_rescue_raises_handled(self):
- FLAGS.allow_admin_api = True
+ self.flags(allow_admin_api=True)
body = {}
def rescue_mock(*args, **kwargs):
@@ -2090,11 +2171,14 @@ class ServersTest(test.TestCase):
self.assertEqual(self.resize_called, True)
def test_resize_server_v11(self):
-
req = webob.Request.blank('/v1.1/servers/1/action')
req.content_type = 'application/json'
req.method = 'POST'
- body_dict = dict(resize=dict(flavorRef="http://localhost/3"))
+ body_dict = {
+ "resize": {
+ "flavorRef": 3,
+ },
+ }
req.body = json.dumps(body_dict)
self.resize_called = False
@@ -2108,8 +2192,8 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 202)
self.assertEqual(self.resize_called, True)
- def test_resize_bad_flavor_fails(self):
- req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3)))
+ def test_resize_bad_flavor_data(self):
+ req = self.webreq('/1/action', 'POST', {"resize": "bad_data"})
self.resize_called = False
@@ -2119,14 +2203,54 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 422)
+ self.assertEqual(res.status_int, 400)
self.assertEqual(self.resize_called, False)
+ def test_resize_invalid_flavorid(self):
+ req = self.webreq('/1/action', 'POST', {"resize": {"flavorId": 300}})
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_resize_nonint_flavorid(self):
+ req = self.webreq('/1/action', 'POST', {"resize": {"flavorId": "a"}})
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_resize_invalid_flavorid_v1_1(self):
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.content_type = 'application/json'
+ req.method = 'POST'
+ resize_body = {
+ "resize": {
+ "image": {
+ "id": 300,
+ },
+ },
+ }
+ req.body = json.dumps(resize_body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_resize_nonint_flavorid_v1_1(self):
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.content_type = 'application/json'
+ req.method = 'POST'
+ resize_body = {
+ "resize": {
+ "image": {
+ "id": "a",
+ },
+ },
+ }
+ req.body = json.dumps(resize_body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_resize_raises_fails(self):
req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3)))
def resize_mock(*args):
- raise Exception('hurr durr')
+ raise Exception("An error occurred.")
self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
@@ -2164,7 +2288,7 @@ class ServersTest(test.TestCase):
req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
def confirm_resize_mock(*args):
- raise Exception('hurr durr')
+ raise Exception("An error occurred.")
self.stubs.Set(nova.compute.api.API, 'confirm_resize',
confirm_resize_mock)
@@ -2191,7 +2315,7 @@ class ServersTest(test.TestCase):
req = self.webreq('/1/action', 'POST', dict(revertResize=None))
def revert_resize_mock(*args):
- raise Exception('hurr durr')
+ raise Exception("An error occurred.")
self.stubs.Set(nova.compute.api.API, 'revert_resize',
revert_resize_mock)
@@ -2203,8 +2327,9 @@ class ServersTest(test.TestCase):
"""This is basically the same as resize, only we provide the `migrate`
attribute in the body's dict.
"""
- req = self.webreq('/1/action', 'POST', dict(migrate=None))
+ req = self.webreq('/1/migrate', 'POST')
+ FLAGS.allow_admin_api = True
self.resize_called = False
def resize_mock(*args):
@@ -2216,6 +2341,14 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 202)
self.assertEqual(self.resize_called, True)
+ def test_migrate_server_no_admin_api_fails(self):
+ req = self.webreq('/1/migrate', 'POST')
+
+ FLAGS.allow_admin_api = False
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 404)
+
def test_shutdown_status(self):
new_server = return_server_with_power_state(power_state.SHUTDOWN)
self.stubs.Set(nova.db.api, 'instance_get', new_server)
@@ -2234,8 +2367,270 @@ class ServersTest(test.TestCase):
res_dict = json.loads(res.body)
self.assertEqual(res_dict['server']['status'], 'SHUTOFF')
+ def test_create_image_v1_1(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ },
+ }
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(202, response.status_int)
+ location = response.headers['Location']
+ self.assertEqual('http://localhost/v1.1/images/123', location)
-class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
+ def test_create_image_v1_1_with_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ 'metadata': {'key': 'asdf'},
+ },
+ }
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(202, response.status_int)
+ location = response.headers['Location']
+ self.assertEqual('http://localhost/v1.1/images/123', location)
+
+ def test_create_image_v1_1_no_name(self):
+ body = {
+ 'createImage': {},
+ }
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_image_v1_1_bad_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'geoff',
+ 'metadata': 'henry',
+ },
+ }
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_backup(self):
+ """The happy path for creating backups"""
+ FLAGS.allow_admin_api = True
+
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(202, response.status_int)
+ self.assertTrue(response.headers['Location'])
+
+ def test_create_backup_v1_1(self):
+ """The happy path for creating backups through v1.1 api"""
+ FLAGS.allow_admin_api = True
+
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(202, response.status_int)
+ self.assertTrue(response.headers['Location'])
+
+ def test_create_backup_admin_api_off(self):
+ """The happy path for creating backups"""
+ FLAGS.allow_admin_api = False
+
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(501, response.status_int)
+
+ def test_create_backup_with_metadata(self):
+ FLAGS.allow_admin_api = True
+
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ 'metadata': {'123': 'asdf'},
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(202, response.status_int)
+ self.assertTrue(response.headers['Location'])
+
+ def test_create_backup_no_name(self):
+ """Name is required for backups"""
+ FLAGS.allow_admin_api = True
+
+ body = {
+ 'createBackup': {
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_backup_no_rotation(self):
+ """Rotation is required for backup requests"""
+ FLAGS.allow_admin_api = True
+
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_backup_no_backup_type(self):
+ """Backup Type (daily or weekly) is required for backup requests"""
+ FLAGS.allow_admin_api = True
+
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'rotation': 1,
+ },
+ }
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_backup_bad_entity(self):
+ FLAGS.allow_admin_api = True
+
+ body = {'createBackup': 'go'}
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+
+class TestServerActionXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ self.deserializer = create_instance_helper.ServerXMLDeserializer()
+
+ def tearDown(self):
+ pass
+
+ def test_create_image(self):
+ serial_request = """
+<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "createImage": {
+ "name": "new-server-test",
+ "metadata": {},
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_create_image_with_metadata(self):
+ serial_request = """
+<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test">
+ <metadata>
+ <meta key="key1">value1</meta>
+ </metadata>
+</createImage>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "createImage": {
+ "name": "new-server-test",
+ "metadata": {"key1": "value1"},
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_create_backup_with_metadata(self):
+ serial_request = """
+<createBackup xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"
+ rotation="12"
+ backup_type="daily">
+ <metadata>
+ <meta key="key1">value1</meta>
+ </metadata>
+</createBackup>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "createBackup": {
+ "name": "new-server-test",
+ "rotation": "12",
+ "backup_type": "daily",
+ "metadata": {"key1": "value1"},
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+
+class TestServerCreateRequestXMLDeserializerV10(unittest.TestCase):
def setUp(self):
self.deserializer = create_instance_helper.ServerXMLDeserializer()
@@ -2249,6 +2644,8 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
"name": "new-server-test",
"imageId": "1",
"flavorId": "1",
+ "metadata": {},
+ "personality": [],
}}
self.assertEquals(request['body'], expected)
@@ -2264,6 +2661,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
"imageId": "1",
"flavorId": "1",
"metadata": {},
+ "personality": [],
}}
self.assertEquals(request['body'], expected)
@@ -2278,6 +2676,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
"name": "new-server-test",
"imageId": "1",
"flavorId": "1",
+ "metadata": {},
"personality": [],
}}
self.assertEquals(request['body'], expected)
@@ -2515,21 +2914,191 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
request = self.deserializer.deserialize(serial_request, 'create')
self.assertEqual(request['body'], expected)
- def test_request_xmlser_with_flavor_image_href(self):
+
+class TestServerCreateRequestXMLDeserializerV11(unittest.TestCase):
+
+ def setUp(self):
+ self.deserializer = create_instance_helper.ServerXMLDeserializer()
+
+ def test_minimal_request(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2"/>"""
+ request = self.deserializer.deserialize(serial_request, 'create')
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "metadata": {},
+ "personality": [],
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_admin_pass(self):
serial_request = """
- <server xmlns="http://docs.openstack.org/compute/api/v1.1"
- name="new-server-test"
- imageRef="http://localhost:8774/v1.1/images/1"
- flavorRef="http://localhost:8774/v1.1/flavors/1">
- </server>"""
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2"
+ adminPass="1234"/>"""
request = self.deserializer.deserialize(serial_request, 'create')
- self.assertEquals(request['body']["server"]["flavorRef"],
- "http://localhost:8774/v1.1/flavors/1")
- self.assertEquals(request['body']["server"]["imageRef"],
- "http://localhost:8774/v1.1/images/1")
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "adminPass": "1234",
+ "metadata": {},
+ "personality": [],
+ },
+ }
+ self.assertEquals(request['body'], expected)
+ def test_image_link(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"
+ imageRef="http://localhost:8774/v1.1/images/2"
+ flavorRef="3"/>"""
+ request = self.deserializer.deserialize(serial_request, 'create')
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "http://localhost:8774/v1.1/images/2",
+ "flavorRef": "3",
+ "metadata": {},
+ "personality": [],
+ },
+ }
+ self.assertEquals(request['body'], expected)
-class TextAddressesXMLSerialization(test.TestCase):
+ def test_flavor_link(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="http://localhost:8774/v1.1/flavors/3"/>"""
+ request = self.deserializer.deserialize(serial_request, 'create')
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "http://localhost:8774/v1.1/flavors/3",
+ "metadata": {},
+ "personality": [],
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_empty_metadata_personality(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2">
+ <metadata/>
+ <personality/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request, 'create')
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "metadata": {},
+ "personality": [],
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_multiple_metadata_items(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2">
+ <metadata>
+ <meta key="one">two</meta>
+ <meta key="open">snack</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request, 'create')
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "metadata": {"one": "two", "open": "snack"},
+ "personality": [],
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_multiple_personality_files(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2">
+ <personality>
+ <file path="/etc/banner.txt">MQ==</file>
+ <file path="/etc/hosts">Mg==</file>
+ </personality>
+</server>"""
+ request = self.deserializer.deserialize(serial_request, 'create')
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "metadata": {},
+ "personality": [
+ {"path": "/etc/banner.txt", "contents": "MQ=="},
+ {"path": "/etc/hosts", "contents": "Mg=="},
+ ],
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_spec_request(self):
+ image_bookmark_link = "http://servers.api.openstack.org/1234/" + \
+ "images/52415800-8b69-11e0-9b19-734f6f006e54"
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="%s"
+ flavorRef="52415800-8b69-11e0-9b19-734f1195ff37"
+ name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">Mg==</file>
+ </personality>
+</server>""" % (image_bookmark_link)
+ request = self.deserializer.deserialize(serial_request, 'create')
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "http://servers.api.openstack.org/1234/" + \
+ "images/52415800-8b69-11e0-9b19-734f6f006e54",
+ "flavorRef": "52415800-8b69-11e0-9b19-734f1195ff37",
+ "metadata": {"My Server Name": "Apache1"},
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "Mg==",
+ },
+ ],
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+
+class TestAddressesXMLSerialization(test.TestCase):
serializer = nova.api.openstack.ips.IPXMLSerializer()
@@ -2589,18 +3158,8 @@ class TestServerInstanceCreation(test.TestCase):
def setUp(self):
super(TestServerInstanceCreation, self).setUp()
- self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
- fakes.FakeAuthDatabase.data = {}
- fakes.stub_out_auth(self.stubs)
fakes.stub_out_image_service(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
- self.allow_admin = FLAGS.allow_admin_api
-
- def tearDown(self):
- self.stubs.UnsetAll()
- FLAGS.allow_admin_api = self.allow_admin
- super(TestServerInstanceCreation, self).tearDown()
def _setup_mock_compute_api_for_personality(self):
@@ -2899,10 +3458,12 @@ class ServersViewBuilderV11Test(test.TestCase):
pass
def _get_instance(self):
+ created_at = datetime.datetime(2010, 10, 10, 12, 0, 0)
+ updated_at = datetime.datetime(2010, 11, 11, 11, 0, 0)
instance = {
"id": 1,
- "created_at": "2010-10-10T12:00:00Z",
- "updated_at": "2010-11-11T11:00:00Z",
+ "created_at": created_at,
+ "updated_at": updated_at,
"admin_pass": "",
"user_id": "",
"project_id": "",
@@ -2950,7 +3511,7 @@ class ServersViewBuilderV11Test(test.TestCase):
address_builder,
flavor_builder,
image_builder,
- base_url
+ base_url,
)
return view_builder
@@ -3106,12 +3667,12 @@ class ServersViewBuilderV11Test(test.TestCase):
},
"flavor": {
"id": "1",
- "links": [
- {
- "rel": "bookmark",
- "href": flavor_bookmark,
- },
- ],
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": flavor_bookmark,
+ },
+ ],
},
"addresses": {},
"metadata": {
@@ -3133,3 +3694,505 @@ class ServersViewBuilderV11Test(test.TestCase):
output = self.view_builder.build(self.instance, True)
self.assertDictMatch(output, expected_server)
+
+
+class ServerXMLSerializationTest(test.TestCase):
+
+ TIMESTAMP = "2010-10-11T10:30:22Z"
+ SERVER_HREF = 'http://localhost/v1.1/servers/123'
+ SERVER_BOOKMARK = 'http://localhost/servers/123'
+ IMAGE_BOOKMARK = 'http://localhost/images/5'
+ FLAVOR_BOOKMARK = 'http://localhost/flavors/1'
+
+ def setUp(self):
+ self.maxDiff = None
+ test.TestCase.setUp(self)
+
+ def test_show(self):
+ serializer = servers.ServerXMLSerializer()
+
+ fixture = {
+ "server": {
+ "id": 1,
+ "uuid": FAKE_UUID,
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ "progress": 0,
+ "name": "test_server",
+ "status": "BUILD",
+ "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.IMAGE_BOOKMARK,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.FLAVOR_BOOKMARK,
+ },
+ ],
+ },
+ "addresses": {
+ "network_one": [
+ {
+ "version": 4,
+ "addr": "67.23.10.138",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.138",
+ },
+ ],
+ "network_two": [
+ {
+ "version": 4,
+ "addr": "67.23.10.139",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.139",
+ },
+ ],
+ },
+ "metadata": {
+ "Open": "Stack",
+ "Number": "1",
+ },
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ }
+ }
+
+ output = serializer.serialize(fixture, 'show')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_server_href = self.SERVER_HREF
+ expected_server_bookmark = self.SERVER_BOOKMARK
+ expected_image_bookmark = self.IMAGE_BOOKMARK
+ expected_flavor_bookmark = self.FLAVOR_BOOKMARK
+ expected_now = self.TIMESTAMP
+ expected_uuid = FAKE_UUID
+ expected = minidom.parseString("""
+ <server id="1"
+ uuid="%(expected_uuid)s"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ name="test_server"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ hostId="e4d909c290d0fb1ca068ffaddf22cbd0"
+ status="BUILD"
+ progress="0">
+ <atom:link href="%(expected_server_href)s" rel="self"/>
+ <atom:link href="%(expected_server_bookmark)s" rel="bookmark"/>
+ <image id="5">
+ <atom:link rel="bookmark" href="%(expected_image_bookmark)s"/>
+ </image>
+ <flavor id="1">
+ <atom:link rel="bookmark" href="%(expected_flavor_bookmark)s"/>
+ </flavor>
+ <metadata>
+ <meta key="Open">
+ Stack
+ </meta>
+ <meta key="Number">
+ 1
+ </meta>
+ </metadata>
+ <addresses>
+ <network id="network_one">
+ <ip version="4" addr="67.23.10.138"/>
+ <ip version="6" addr="::babe:67.23.10.138"/>
+ </network>
+ <network id="network_two">
+ <ip version="4" addr="67.23.10.139"/>
+ <ip version="6" addr="::babe:67.23.10.139"/>
+ </network>
+ </addresses>
+ </server>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_create(self):
+ serializer = servers.ServerXMLSerializer()
+
+ fixture = {
+ "server": {
+ "id": 1,
+ "uuid": FAKE_UUID,
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ "progress": 0,
+ "name": "test_server",
+ "status": "BUILD",
+ "hostId": "e4d909c290d0fb1ca068ffaddf22cbd0",
+ "adminPass": "test_password",
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.IMAGE_BOOKMARK,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.FLAVOR_BOOKMARK,
+ },
+ ],
+ },
+ "addresses": {
+ "network_one": [
+ {
+ "version": 4,
+ "addr": "67.23.10.138",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.138",
+ },
+ ],
+ "network_two": [
+ {
+ "version": 4,
+ "addr": "67.23.10.139",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.139",
+ },
+ ],
+ },
+ "metadata": {
+ "Open": "Stack",
+ "Number": "1",
+ },
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ }
+ }
+
+ output = serializer.serialize(fixture, 'create')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_server_href = self.SERVER_HREF
+ expected_server_bookmark = self.SERVER_BOOKMARK
+ expected_image_bookmark = self.IMAGE_BOOKMARK
+ expected_flavor_bookmark = self.FLAVOR_BOOKMARK
+ expected_now = self.TIMESTAMP
+ expected_uuid = FAKE_UUID
+ expected = minidom.parseString("""
+ <server id="1"
+ uuid="%(expected_uuid)s"
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ name="test_server"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ hostId="e4d909c290d0fb1ca068ffaddf22cbd0"
+ status="BUILD"
+ adminPass="test_password"
+ progress="0">
+ <atom:link href="%(expected_server_href)s" rel="self"/>
+ <atom:link href="%(expected_server_bookmark)s" rel="bookmark"/>
+ <image id="5">
+ <atom:link rel="bookmark" href="%(expected_image_bookmark)s"/>
+ </image>
+ <flavor id="1">
+ <atom:link rel="bookmark" href="%(expected_flavor_bookmark)s"/>
+ </flavor>
+ <metadata>
+ <meta key="Open">
+ Stack
+ </meta>
+ <meta key="Number">
+ 1
+ </meta>
+ </metadata>
+ <addresses>
+ <network id="network_one">
+ <ip version="4" addr="67.23.10.138"/>
+ <ip version="6" addr="::babe:67.23.10.138"/>
+ </network>
+ <network id="network_two">
+ <ip version="4" addr="67.23.10.139"/>
+ <ip version="6" addr="::babe:67.23.10.139"/>
+ </network>
+ </addresses>
+ </server>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_index(self):
+ serializer = servers.ServerXMLSerializer()
+
+ expected_server_href = 'http://localhost/v1.1/servers/1'
+ expected_server_bookmark = 'http://localhost/servers/1'
+ expected_server_href_2 = 'http://localhost/v1.1/servers/2'
+ expected_server_bookmark_2 = 'http://localhost/servers/2'
+ fixture = {"servers": [
+ {
+ "id": 1,
+ "name": "test_server",
+ 'links': [
+ {
+ 'href': expected_server_href,
+ 'rel': 'self',
+ },
+ {
+ 'href': expected_server_bookmark,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ "id": 2,
+ "name": "test_server_2",
+ 'links': [
+ {
+ 'href': expected_server_href_2,
+ 'rel': 'self',
+ },
+ {
+ 'href': expected_server_bookmark_2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ]}
+
+ output = serializer.serialize(fixture, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <servers xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom">
+ <server id="1" name="test_server">
+ <atom:link href="%(expected_server_href)s" rel="self"/>
+ <atom:link href="%(expected_server_bookmark)s" rel="bookmark"/>
+ </server>
+ <server id="2" name="test_server_2">
+ <atom:link href="%(expected_server_href_2)s" rel="self"/>
+ <atom:link href="%(expected_server_bookmark_2)s" rel="bookmark"/>
+ </server>
+ </servers>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_detail(self):
+ serializer = servers.ServerXMLSerializer()
+
+ expected_server_href = 'http://localhost/v1.1/servers/1'
+ expected_server_bookmark = 'http://localhost/servers/1'
+ expected_image_bookmark = self.IMAGE_BOOKMARK
+ expected_flavor_bookmark = self.FLAVOR_BOOKMARK
+ expected_now = self.TIMESTAMP
+ expected_uuid = FAKE_UUID
+
+ expected_server_href_2 = 'http://localhost/v1.1/servers/2'
+ expected_server_bookmark_2 = 'http://localhost/servers/2'
+ fixture = {"servers": [
+ {
+ "id": 1,
+ "uuid": FAKE_UUID,
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ "progress": 0,
+ "name": "test_server",
+ "status": "BUILD",
+ "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": expected_image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": expected_flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ "network_one": [
+ {
+ "version": 4,
+ "addr": "67.23.10.138",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.138",
+ },
+ ],
+ },
+ "metadata": {
+ "Number": "1",
+ },
+ "links": [
+ {
+ "href": expected_server_href,
+ "rel": "self",
+ },
+ {
+ "href": expected_server_bookmark,
+ "rel": "bookmark",
+ },
+ ],
+ },
+ {
+ "id": 2,
+ "uuid": FAKE_UUID,
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ "progress": 100,
+ "name": "test_server_2",
+ "status": "ACTIVE",
+ "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": expected_image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": expected_flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ "network_one": [
+ {
+ "version": 4,
+ "addr": "67.23.10.138",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.138",
+ },
+ ],
+ },
+ "metadata": {
+ "Number": "2",
+ },
+ "links": [
+ {
+ "href": expected_server_href_2,
+ "rel": "self",
+ },
+ {
+ "href": expected_server_bookmark_2,
+ "rel": "bookmark",
+ },
+ ],
+ },
+ ]}
+
+ output = serializer.serialize(fixture, 'detail')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <servers xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:atom="http://www.w3.org/2005/Atom">
+ <server id="1"
+ uuid="%(expected_uuid)s"
+ name="test_server"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ hostId="e4d909c290d0fb1ca068ffaddf22cbd0"
+ status="BUILD"
+ progress="0">
+ <atom:link href="%(expected_server_href)s" rel="self"/>
+ <atom:link href="%(expected_server_bookmark)s" rel="bookmark"/>
+ <image id="5">
+ <atom:link rel="bookmark" href="%(expected_image_bookmark)s"/>
+ </image>
+ <flavor id="1">
+ <atom:link rel="bookmark" href="%(expected_flavor_bookmark)s"/>
+ </flavor>
+ <metadata>
+ <meta key="Number">
+ 1
+ </meta>
+ </metadata>
+ <addresses>
+ <network id="network_one">
+ <ip version="4" addr="67.23.10.138"/>
+ <ip version="6" addr="::babe:67.23.10.138"/>
+ </network>
+ </addresses>
+ </server>
+ <server id="2"
+ uuid="%(expected_uuid)s"
+ name="test_server_2"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ hostId="e4d909c290d0fb1ca068ffaddf22cbd0"
+ status="ACTIVE"
+ progress="100">
+ <atom:link href="%(expected_server_href_2)s" rel="self"/>
+ <atom:link href="%(expected_server_bookmark_2)s" rel="bookmark"/>
+ <image id="5">
+ <atom:link rel="bookmark" href="%(expected_image_bookmark)s"/>
+ </image>
+ <flavor id="1">
+ <atom:link rel="bookmark" href="%(expected_flavor_bookmark)s"/>
+ </flavor>
+ <metadata>
+ <meta key="Number">
+ 2
+ </meta>
+ </metadata>
+ <addresses>
+ <network id="network_one">
+ <ip version="4" addr="67.23.10.138"/>
+ <ip version="6" addr="::babe:67.23.10.138"/>
+ </network>
+ </addresses>
+ </server>
+ </servers>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
diff --git a/nova/tests/api/openstack/test_shared_ip_groups.py b/nova/tests/api/openstack/test_shared_ip_groups.py
index c2bd7e45a..36fa1de0f 100644
--- a/nova/tests/api/openstack/test_shared_ip_groups.py
+++ b/nova/tests/api/openstack/test_shared_ip_groups.py
@@ -15,26 +15,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import stubout
import webob
from nova import test
-from nova.api.openstack import shared_ip_groups
from nova.tests.api.openstack import fakes
class SharedIpGroupsTest(test.TestCase):
- def setUp(self):
- super(SharedIpGroupsTest, self).setUp()
- self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.reset_fake_data()
- fakes.FakeAuthDatabase.data = {}
- fakes.stub_out_auth(self.stubs)
-
- def tearDown(self):
- self.stubs.UnsetAll()
- super(SharedIpGroupsTest, self).tearDown()
-
def test_get_shared_ip_groups(self):
req = webob.Request.blank('/v1.0/shared_ip_groups')
res = req.get_response(fakes.wsgi_app())
diff --git a/nova/tests/api/openstack/test_users.py b/nova/tests/api/openstack/test_users.py
index effb2f592..705c02f6b 100644
--- a/nova/tests/api/openstack/test_users.py
+++ b/nova/tests/api/openstack/test_users.py
@@ -15,7 +15,6 @@
import json
-import stubout
import webob
from nova import flags
@@ -41,7 +40,7 @@ def fake_admin_check(self, req):
class UsersTest(test.TestCase):
def setUp(self):
super(UsersTest, self).setUp()
- self.stubs = stubout.StubOutForTesting()
+ self.flags(allow_admin_api=True)
self.stubs.Set(users.Controller, '__init__',
fake_init)
self.stubs.Set(users.Controller, '_check_admin',
@@ -58,16 +57,10 @@ class UsersTest(test.TestCase):
fakes.stub_out_auth(self.stubs)
self.allow_admin = FLAGS.allow_admin_api
- FLAGS.allow_admin_api = True
fakemgr = fakes.FakeAuthManager()
fakemgr.add_user(User('id1', 'guy1', 'acc1', 'secret1', False))
fakemgr.add_user(User('id2', 'guy2', 'acc2', 'secret2', True))
- def tearDown(self):
- self.stubs.UnsetAll()
- FLAGS.allow_admin_api = self.allow_admin
- super(UsersTest, self).tearDown()
-
def test_get_user_list(self):
req = webob.Request.blank('/v1.0/users')
res = req.get_response(fakes.wsgi_app())
diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py
index da964ee1f..e68455778 100644
--- a/nova/tests/api/openstack/test_versions.py
+++ b/nova/tests/api/openstack/test_versions.py
@@ -16,21 +16,92 @@
# under the License.
import json
+import stubout
import webob
+import xml.etree.ElementTree
+
from nova import context
from nova import test
from nova.tests.api.openstack import fakes
from nova.api.openstack import versions
from nova.api.openstack import views
+from nova.api.openstack import wsgi
+
+VERSIONS = {
+ "v1.0": {
+ "id": "v1.0",
+ "status": "DEPRECATED",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.0/cs-devguide-20110125.pdf"
+ },
+ {
+ "rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.0/application.wadl"
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute-v1.0+xml"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute-v1.0+json"
+ }
+ ],
+ },
+ "v1.1": {
+ "id": "v1.1",
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.1/cs-devguide-20110125.pdf"
+ },
+ {
+ "rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.1/application.wadl"
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute-v1.1+xml"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute-v1.1+json"
+ }
+ ],
+ },
+}
class VersionsTest(test.TestCase):
def setUp(self):
super(VersionsTest, self).setUp()
self.context = context.get_admin_context()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.stub_out_auth(self.stubs)
+ #Stub out VERSIONS
+ self.old_versions = versions.VERSIONS
+ versions.VERSIONS = VERSIONS
def tearDown(self):
+ versions.VERSIONS = self.old_versions
super(VersionsTest, self).tearDown()
def test_get_version_list(self):
@@ -44,7 +115,7 @@ class VersionsTest(test.TestCase):
{
"id": "v1.1",
"status": "CURRENT",
- "updated": "2011-07-18T11:30:00Z",
+ "updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
@@ -54,7 +125,7 @@ class VersionsTest(test.TestCase):
{
"id": "v1.0",
"status": "DEPRECATED",
- "updated": "2010-10-09T11:30:00Z",
+ "updated": "2011-01-21T11:33:21Z",
"links": [
{
"rel": "self",
@@ -64,6 +135,183 @@ class VersionsTest(test.TestCase):
]
self.assertEqual(versions, expected)
+ def test_get_version_1_0_detail(self):
+ req = webob.Request.blank('/v1.0/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ version = json.loads(res.body)
+ expected = {
+ "version": {
+ "id": "v1.0",
+ "status": "DEPRECATED",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.0/"
+ },
+ {
+ "rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.0/cs-devguide-20110125.pdf"
+ },
+ {
+ "rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.0/application.wadl"
+ }
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/"
+ "vnd.openstack.compute-v1.0+xml"
+ },
+ {
+ "base": "application/json",
+ "type": "application/"
+ "vnd.openstack.compute-v1.0+json"
+ }
+ ]
+ }
+ }
+ self.assertEqual(expected, version)
+
+ def test_get_version_1_1_detail(self):
+ req = webob.Request.blank('/v1.1/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ version = json.loads(res.body)
+ expected = {
+ "version": {
+ "id": "v1.1",
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/"
+ },
+ {
+ "rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.1/cs-devguide-20110125.pdf"
+ },
+ {
+ "rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.1/application.wadl"
+ }
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/"
+ "vnd.openstack.compute-v1.1+xml"
+ },
+ {
+ "base": "application/json",
+ "type": "application/"
+ "vnd.openstack.compute-v1.1+json"
+ }
+ ]
+ }
+ }
+ self.assertEqual(expected, version)
+
+ def test_get_version_1_0_detail_xml(self):
+ req = webob.Request.blank('/v1.0/')
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/xml")
+ root = xml.etree.ElementTree.XML(res.body)
+ self.assertEqual(root.tag.split('}')[1], "version")
+ self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11)
+
+ children = list(root)
+ media_types = children[0]
+ media_type_nodes = list(media_types)
+ links = (children[1], children[2], children[3])
+
+ self.assertEqual(media_types.tag.split('}')[1], 'media-types')
+ for media_node in media_type_nodes:
+ self.assertEqual(media_node.tag.split('}')[1], 'media-type')
+
+ expected = """
+ <version id="v1.0" status="DEPRECATED"
+ updated="2011-01-21T11:33:21Z"
+ xmlns="%s"
+ xmlns:atom="http://www.w3.org/2005/Atom">
+
+ <media-types>
+ <media-type base="application/xml"
+ type="application/vnd.openstack.compute-v1.0+xml"/>
+ <media-type base="application/json"
+ type="application/vnd.openstack.compute-v1.0+json"/>
+ </media-types>
+
+ <atom:link href="http://localhost/v1.0/"
+ rel="self"/>
+
+ <atom:link href="http://docs.rackspacecloud.com/servers/
+ api/v1.0/cs-devguide-20110125.pdf"
+ rel="describedby"
+ type="application/pdf"/>
+
+ <atom:link href="http://docs.rackspacecloud.com/servers/
+ api/v1.0/application.wadl"
+ rel="describedby"
+ type="application/vnd.sun.wadl+xml"/>
+ </version>""".replace(" ", "").replace("\n", "") % wsgi.XMLNS_V11
+
+ actual = res.body.replace(" ", "").replace("\n", "")
+ self.assertEqual(expected, actual)
+
+ def test_get_version_1_1_detail_xml(self):
+ req = webob.Request.blank('/v1.1/')
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/xml")
+ expected = """
+ <version id="v1.1" status="CURRENT"
+ updated="2011-01-21T11:33:21Z"
+ xmlns="%s"
+ xmlns:atom="http://www.w3.org/2005/Atom">
+
+ <media-types>
+ <media-type base="application/xml"
+ type="application/vnd.openstack.compute-v1.1+xml"/>
+ <media-type base="application/json"
+ type="application/vnd.openstack.compute-v1.1+json"/>
+ </media-types>
+
+ <atom:link href="http://localhost/v1.1/"
+ rel="self"/>
+
+ <atom:link href="http://docs.rackspacecloud.com/servers/
+ api/v1.1/cs-devguide-20110125.pdf"
+ rel="describedby"
+ type="application/pdf"/>
+
+ <atom:link href="http://docs.rackspacecloud.com/servers/
+ api/v1.1/application.wadl"
+ rel="describedby"
+ type="application/vnd.sun.wadl+xml"/>
+ </version>""".replace(" ", "").replace("\n", "") % wsgi.XMLNS_V11
+
+ actual = res.body.replace(" ", "").replace("\n", "")
+ self.assertEqual(expected, actual)
+
def test_get_version_list_xml(self):
req = webob.Request.blank('/')
req.accept = "application/xml"
@@ -71,18 +319,94 @@ class VersionsTest(test.TestCase):
self.assertEqual(res.status_int, 200)
self.assertEqual(res.content_type, "application/xml")
- expected = """<versions>
- <version id="v1.1" status="CURRENT" updated="2011-07-18T11:30:00Z">
+ expected = """
+ <versions xmlns="%s" xmlns:atom="%s">
+ <version id="v1.1" status="CURRENT" updated="2011-01-21T11:33:21Z">
<atom:link href="http://localhost/v1.1/" rel="self"/>
</version>
<version id="v1.0" status="DEPRECATED"
- updated="2010-10-09T11:30:00Z">
+ updated="2011-01-21T11:33:21Z">
<atom:link href="http://localhost/v1.0/" rel="self"/>
</version>
- </versions>""".replace(" ", "").replace("\n", "")
+ </versions>""".replace(" ", "").replace("\n", "") % (wsgi.XMLNS_V11,
+ wsgi.XMLNS_ATOM)
+
+ actual = res.body.replace(" ", "").replace("\n", "")
+
+ self.assertEqual(expected, actual)
+
+ def test_get_version_1_0_detail_atom(self):
+ req = webob.Request.blank('/v1.0/')
+ req.accept = "application/atom+xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual("application/atom+xml", res.content_type)
+ expected = """
+ <feed xmlns="http://www.w3.org/2005/Atom">
+ <title type="text">About This Version</title>
+ <updated>2011-01-21T11:33:21Z</updated>
+ <id>http://localhost/v1.0/</id>
+ <author>
+ <name>Rackspace</name>
+ <uri>http://www.rackspace.com/</uri>
+ </author>
+ <link href="http://localhost/v1.0/" rel="self"/>
+ <entry>
+ <id>http://localhost/v1.0/</id>
+ <title type="text">Version v1.0</title>
+ <updated>2011-01-21T11:33:21Z</updated>
+ <link href="http://localhost/v1.0/"
+ rel="self"/>
+ <link href="http://docs.rackspacecloud.com/servers/
+ api/v1.0/cs-devguide-20110125.pdf"
+ rel="describedby" type="application/pdf"/>
+ <link href="http://docs.rackspacecloud.com/servers/
+ api/v1.0/application.wadl"
+ rel="describedby" type="application/vnd.sun.wadl+xml"/>
+ <content type="text">
+ Version v1.0 DEPRECATED (2011-01-21T11:33:21Z)
+ </content>
+ </entry>
+ </feed>""".replace(" ", "").replace("\n", "")
actual = res.body.replace(" ", "").replace("\n", "")
+ self.assertEqual(expected, actual)
+
+ def test_get_version_1_1_detail_atom(self):
+ req = webob.Request.blank('/v1.1/')
+ req.accept = "application/atom+xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual("application/atom+xml", res.content_type)
+ expected = """
+ <feed xmlns="http://www.w3.org/2005/Atom">
+ <title type="text">About This Version</title>
+ <updated>2011-01-21T11:33:21Z</updated>
+ <id>http://localhost/v1.1/</id>
+ <author>
+ <name>Rackspace</name>
+ <uri>http://www.rackspace.com/</uri>
+ </author>
+ <link href="http://localhost/v1.1/" rel="self"/>
+ <entry>
+ <id>http://localhost/v1.1/</id>
+ <title type="text">Version v1.1</title>
+ <updated>2011-01-21T11:33:21Z</updated>
+ <link href="http://localhost/v1.1/"
+ rel="self"/>
+ <link href="http://docs.rackspacecloud.com/servers/
+ api/v1.1/cs-devguide-20110125.pdf"
+ rel="describedby" type="application/pdf"/>
+ <link href="http://docs.rackspacecloud.com/servers/
+ api/v1.1/application.wadl"
+ rel="describedby" type="application/vnd.sun.wadl+xml"/>
+ <content type="text">
+ Version v1.1 CURRENT (2011-01-21T11:33:21Z)
+ </content>
+ </entry>
+ </feed>""".replace(" ", "").replace("\n", "")
+ actual = res.body.replace(" ", "").replace("\n", "")
self.assertEqual(expected, actual)
def test_get_version_list_atom(self):
@@ -95,7 +419,7 @@ class VersionsTest(test.TestCase):
expected = """
<feed xmlns="http://www.w3.org/2005/Atom">
<title type="text">Available API Versions</title>
- <updated>2011-07-18T11:30:00Z</updated>
+ <updated>2011-01-21T11:33:21Z</updated>
<id>http://localhost/</id>
<author>
<name>Rackspace</name>
@@ -105,19 +429,19 @@ class VersionsTest(test.TestCase):
<entry>
<id>http://localhost/v1.1/</id>
<title type="text">Version v1.1</title>
- <updated>2011-07-18T11:30:00Z</updated>
+ <updated>2011-01-21T11:33:21Z</updated>
<link href="http://localhost/v1.1/" rel="self"/>
<content type="text">
- Version v1.1 CURRENT (2011-07-18T11:30:00Z)
+ Version v1.1 CURRENT (2011-01-21T11:33:21Z)
</content>
</entry>
<entry>
<id>http://localhost/v1.0/</id>
<title type="text">Version v1.0</title>
- <updated>2010-10-09T11:30:00Z</updated>
+ <updated>2011-01-21T11:33:21Z</updated>
<link href="http://localhost/v1.0/" rel="self"/>
<content type="text">
- Version v1.0 DEPRECATED (2010-10-09T11:30:00Z)
+ Version v1.0 DEPRECATED (2011-01-21T11:33:21Z)
</content>
</entry>
</feed>
@@ -127,28 +451,184 @@ class VersionsTest(test.TestCase):
self.assertEqual(expected, actual)
+ def test_multi_choice_image(self):
+ req = webob.Request.blank('/images/1')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 300)
+ self.assertEqual(res.content_type, "application/json")
+
+ expected = {
+ "choices": [
+ {
+ "id": "v1.1",
+ "status": "CURRENT",
+ "links": [
+ {
+ "href": "http://localhost/v1.1/images/1",
+ "rel": "self",
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute-v1.1+xml"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute-v1.1+json"
+ },
+ ],
+ },
+ {
+ "id": "v1.0",
+ "status": "DEPRECATED",
+ "links": [
+ {
+ "href": "http://localhost/v1.0/images/1",
+ "rel": "self",
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute-v1.0+xml"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute-v1.0+json"
+ },
+ ],
+ },
+ ], }
+
+ self.assertDictMatch(expected, json.loads(res.body))
+
+ def test_multi_choice_image_xml(self):
+ req = webob.Request.blank('/images/1')
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 300)
+ self.assertEqual(res.content_type, "application/xml")
+
+ expected = """
+ <choices xmlns="%s" xmlns:atom="%s">
+ <version id="v1.1" status="CURRENT">
+ <media-types>
+ <media-type base="application/xml"
+ type="application/vnd.openstack.compute-v1.1+xml"/>
+ <media-type base="application/json"
+ type="application/vnd.openstack.compute-v1.1+json"/>
+ </media-types>
+ <atom:link href="http://localhost/v1.1/images/1" rel="self"/>
+ </version>
+ <version id="v1.0" status="DEPRECATED">
+ <media-types>
+ <media-type base="application/xml"
+ type="application/vnd.openstack.compute-v1.0+xml"/>
+ <media-type base="application/json"
+ type="application/vnd.openstack.compute-v1.0+json"/>
+ </media-types>
+ <atom:link href="http://localhost/v1.0/images/1" rel="self"/>
+ </version>
+ </choices>""".replace(" ", "").replace("\n", "") % (wsgi.XMLNS_V11,
+ wsgi.XMLNS_ATOM)
+
+ def test_multi_choice_server_atom(self):
+ """
+ Make sure multi choice responses do not have content-type
+ application/atom+xml (should use default of json)
+ """
+ req = webob.Request.blank('/servers/2')
+ req.accept = "application/atom+xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 300)
+ self.assertEqual(res.content_type, "application/json")
+
+ def test_multi_choice_server(self):
+ req = webob.Request.blank('/servers/2')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 300)
+ self.assertEqual(res.content_type, "application/json")
+
+ expected = {
+ "choices": [
+ {
+ "id": "v1.1",
+ "status": "CURRENT",
+ "links": [
+ {
+ "href": "http://localhost/v1.1/servers/2",
+ "rel": "self",
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute-v1.1+xml"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute-v1.1+json"
+ },
+ ],
+ },
+ {
+ "id": "v1.0",
+ "status": "DEPRECATED",
+ "links": [
+ {
+ "href": "http://localhost/v1.0/servers/2",
+ "rel": "self",
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute-v1.0+xml"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute-v1.0+json"
+ },
+ ],
+ },
+ ], }
+
+ self.assertDictMatch(expected, json.loads(res.body))
+
+
+class VersionsViewBuilderTests(test.TestCase):
def test_view_builder(self):
base_url = "http://example.org/"
version_data = {
- "id": "3.2.1",
- "status": "CURRENT",
- "updated": "2011-07-18T11:30:00Z"}
+ "v3.2.1": {
+ "id": "3.2.1",
+ "status": "CURRENT",
+ "updated": "2011-07-18T11:30:00Z",
+ }
+ }
expected = {
- "id": "3.2.1",
- "status": "CURRENT",
- "updated": "2011-07-18T11:30:00Z",
- "links": [
+ "versions": [
{
- "rel": "self",
- "href": "http://example.org/3.2.1/",
- },
- ],
+ "id": "3.2.1",
+ "status": "CURRENT",
+ "updated": "2011-07-18T11:30:00Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://example.org/3.2.1/",
+ },
+ ],
+ }
+ ]
}
builder = views.versions.ViewBuilder(base_url)
- output = builder.build(version_data)
+ output = builder.build_versions(version_data)
self.assertEqual(output, expected)
@@ -163,7 +643,9 @@ class VersionsTest(test.TestCase):
self.assertEqual(actual, expected)
- def test_xml_serializer(self):
+
+class VersionsSerializerTests(test.TestCase):
+ def test_versions_list_xml_serializer(self):
versions_data = {
'versions': [
{
@@ -180,20 +662,137 @@ class VersionsTest(test.TestCase):
]
}
- expected = """
- <versions>
- <version id="2.7.1" status="DEPRECATED"
- updated="2011-07-18T11:30:00Z">
- <atom:link href="http://test/2.7.1" rel="self"/>
- </version>
- </versions>""".replace(" ", "").replace("\n", "")
+ serializer = versions.VersionsXMLSerializer()
+ response = serializer.index(versions_data)
+
+ root = xml.etree.ElementTree.XML(response)
+ self.assertEqual(root.tag.split('}')[1], "versions")
+ self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11)
+ version = list(root)[0]
+ self.assertEqual(version.tag.split('}')[1], "version")
+ self.assertEqual(version.get('id'),
+ versions_data['versions'][0]['id'])
+ self.assertEqual(version.get('status'),
+ versions_data['versions'][0]['status'])
+
+ link = list(version)[0]
+
+ self.assertEqual(link.tag.split('}')[1], "link")
+ self.assertEqual(link.tag.split('}')[0].strip('{'), wsgi.XMLNS_ATOM)
+ for key, val in versions_data['versions'][0]['links'][0].items():
+ self.assertEqual(link.get(key), val)
+
+ def test_versions_multi_xml_serializer(self):
+ versions_data = {
+ 'choices': [
+ {
+ "id": "2.7.1",
+ "updated": "2011-07-18T11:30:00Z",
+ "status": "DEPRECATED",
+ "media-types": VERSIONS['v1.1']['media-types'],
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://test/2.7.1/images",
+ },
+ ],
+ },
+ ]
+ }
serializer = versions.VersionsXMLSerializer()
- response = serializer.default(versions_data)
- response = response.replace(" ", "").replace("\n", "")
- self.assertEqual(expected, response)
+ response = serializer.multi(versions_data)
+
+ root = xml.etree.ElementTree.XML(response)
+ self.assertEqual(root.tag.split('}')[1], "choices")
+ self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11)
+ version = list(root)[0]
+ self.assertEqual(version.tag.split('}')[1], "version")
+ self.assertEqual(version.get('id'), versions_data['choices'][0]['id'])
+ self.assertEqual(version.get('status'),
+ versions_data['choices'][0]['status'])
+
+ media_types = list(version)[0]
+ media_type_nodes = list(media_types)
+ self.assertEqual(media_types.tag.split('}')[1], "media-types")
+
+ set_types = versions_data['choices'][0]['media-types']
+ for i, type in enumerate(set_types):
+ node = media_type_nodes[i]
+ self.assertEqual(node.tag.split('}')[1], "media-type")
+ for key, val in set_types[i].items():
+ self.assertEqual(node.get(key), val)
+
+ link = list(version)[1]
+
+ self.assertEqual(link.tag.split('}')[1], "link")
+ self.assertEqual(link.tag.split('}')[0].strip('{'), wsgi.XMLNS_ATOM)
+ for key, val in versions_data['choices'][0]['links'][0].items():
+ self.assertEqual(link.get(key), val)
+
+ def test_version_detail_xml_serializer(self):
+ version_data = {
+ "version": {
+ "id": "v1.0",
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.0/"
+ },
+ {
+ "rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.0/cs-devguide-20110125.pdf"
+ },
+ {
+ "rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.0/application.wadl"
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute-v1.0+xml"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute-v1.0+json"
+ }
+ ],
+ },
+ }
+
+ serializer = versions.VersionsXMLSerializer()
+ response = serializer.show(version_data)
+
+ root = xml.etree.ElementTree.XML(response)
+ self.assertEqual(root.tag.split('}')[1], "version")
+ self.assertEqual(root.tag.split('}')[0].strip('{'), wsgi.XMLNS_V11)
- def test_atom_serializer(self):
+ children = list(root)
+ media_types = children[0]
+ media_type_nodes = list(media_types)
+ links = (children[1], children[2], children[3])
+
+ self.assertEqual(media_types.tag.split('}')[1], 'media-types')
+ for i, media_node in enumerate(media_type_nodes):
+ self.assertEqual(media_node.tag.split('}')[1], 'media-type')
+ for key, val in version_data['version']['media-types'][i].items():
+ self.assertEqual(val, media_node.get(key))
+
+ for i, link in enumerate(links):
+ self.assertEqual(link.tag.split('}')[0].strip('{'),
+ 'http://www.w3.org/2005/Atom')
+ self.assertEqual(link.tag.split('}')[1], 'link')
+ for key, val in version_data['version']['links'][i].items():
+ self.assertEqual(val, link.get(key))
+
+ def test_versions_list_atom_serializer(self):
versions_data = {
'versions': [
{
@@ -210,45 +809,158 @@ class VersionsTest(test.TestCase):
]
}
- expected = """
- <feed xmlns="http://www.w3.org/2005/Atom">
- <title type="text">
- Available API Versions
- </title>
- <updated>
- 2011-07-20T11:40:00Z
- </updated>
- <id>
- http://test/
- </id>
- <author>
- <name>
- Rackspace
- </name>
- <uri>
- http://www.rackspace.com/
- </uri>
- </author>
- <link href="http://test/" rel="self"/>
- <entry>
- <id>
- http://test/2.9.8
- </id>
- <title type="text">
- Version 2.9.8
- </title>
- <updated>
- 2011-07-20T11:40:00Z
- </updated>
- <link href="http://test/2.9.8" rel="self"/>
- <content type="text">
- Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)
- </content>
- </entry>
- </feed>""".replace(" ", "").replace("\n", "")
+ serializer = versions.VersionsAtomSerializer()
+ response = serializer.index(versions_data)
+
+ root = xml.etree.ElementTree.XML(response)
+ self.assertEqual(root.tag.split('}')[1], "feed")
+ self.assertEqual(root.tag.split('}')[0].strip('{'),
+ "http://www.w3.org/2005/Atom")
+
+ children = list(root)
+ title = children[0]
+ updated = children[1]
+ id = children[2]
+ author = children[3]
+ link = children[4]
+ entry = children[5]
+
+ self.assertEqual(title.tag.split('}')[1], 'title')
+ self.assertEqual(title.text, 'Available API Versions')
+ self.assertEqual(updated.tag.split('}')[1], 'updated')
+ self.assertEqual(updated.text, '2011-07-20T11:40:00Z')
+ self.assertEqual(id.tag.split('}')[1], 'id')
+ self.assertEqual(id.text, 'http://test/')
+
+ self.assertEqual(author.tag.split('}')[1], 'author')
+ author_name = list(author)[0]
+ author_uri = list(author)[1]
+ self.assertEqual(author_name.tag.split('}')[1], 'name')
+ self.assertEqual(author_name.text, 'Rackspace')
+ self.assertEqual(author_uri.tag.split('}')[1], 'uri')
+ self.assertEqual(author_uri.text, 'http://www.rackspace.com/')
+
+ self.assertEqual(link.get('href'), 'http://test/')
+ self.assertEqual(link.get('rel'), 'self')
+
+ self.assertEqual(entry.tag.split('}')[1], 'entry')
+ entry_children = list(entry)
+ entry_id = entry_children[0]
+ entry_title = entry_children[1]
+ entry_updated = entry_children[2]
+ entry_link = entry_children[3]
+ entry_content = entry_children[4]
+ self.assertEqual(entry_id.tag.split('}')[1], "id")
+ self.assertEqual(entry_id.text, "http://test/2.9.8")
+ self.assertEqual(entry_title.tag.split('}')[1], "title")
+ self.assertEqual(entry_title.get('type'), "text")
+ self.assertEqual(entry_title.text, "Version 2.9.8")
+ self.assertEqual(entry_updated.tag.split('}')[1], "updated")
+ self.assertEqual(entry_updated.text, "2011-07-20T11:40:00Z")
+ self.assertEqual(entry_link.tag.split('}')[1], "link")
+ self.assertEqual(entry_link.get('href'), "http://test/2.9.8")
+ self.assertEqual(entry_link.get('rel'), "self")
+ self.assertEqual(entry_content.tag.split('}')[1], "content")
+ self.assertEqual(entry_content.get('type'), "text")
+ self.assertEqual(entry_content.text,
+ "Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)")
+
+ def test_version_detail_atom_serializer(self):
+ versions_data = {
+ "version": {
+ "id": "v1.1",
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/"
+ },
+ {
+ "rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.1/cs-devguide-20110125.pdf"
+ },
+ {
+ "rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ "href": "http://docs.rackspacecloud.com/"
+ "servers/api/v1.1/application.wadl"
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute-v1.1+xml"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute-v1.1+json"
+ }
+ ],
+ },
+ }
serializer = versions.VersionsAtomSerializer()
- response = serializer.default(versions_data)
- print response
- response = response.replace(" ", "").replace("\n", "")
- self.assertEqual(expected, response)
+ response = serializer.show(versions_data)
+
+ root = xml.etree.ElementTree.XML(response)
+ self.assertEqual(root.tag.split('}')[1], "feed")
+ self.assertEqual(root.tag.split('}')[0].strip('{'),
+ "http://www.w3.org/2005/Atom")
+
+ children = list(root)
+ title = children[0]
+ updated = children[1]
+ id = children[2]
+ author = children[3]
+ link = children[4]
+ entry = children[5]
+
+ self.assertEqual(root.tag.split('}')[1], 'feed')
+ self.assertEqual(title.tag.split('}')[1], 'title')
+ self.assertEqual(title.text, 'About This Version')
+ self.assertEqual(updated.tag.split('}')[1], 'updated')
+ self.assertEqual(updated.text, '2011-01-21T11:33:21Z')
+ self.assertEqual(id.tag.split('}')[1], 'id')
+ self.assertEqual(id.text, 'http://localhost/v1.1/')
+
+ self.assertEqual(author.tag.split('}')[1], 'author')
+ author_name = list(author)[0]
+ author_uri = list(author)[1]
+ self.assertEqual(author_name.tag.split('}')[1], 'name')
+ self.assertEqual(author_name.text, 'Rackspace')
+ self.assertEqual(author_uri.tag.split('}')[1], 'uri')
+ self.assertEqual(author_uri.text, 'http://www.rackspace.com/')
+
+ self.assertEqual(link.get('href'),
+ 'http://localhost/v1.1/')
+ self.assertEqual(link.get('rel'), 'self')
+
+ self.assertEqual(entry.tag.split('}')[1], 'entry')
+ entry_children = list(entry)
+ entry_id = entry_children[0]
+ entry_title = entry_children[1]
+ entry_updated = entry_children[2]
+ entry_links = (entry_children[3], entry_children[4], entry_children[5])
+ entry_content = entry_children[6]
+
+ self.assertEqual(entry_id.tag.split('}')[1], "id")
+ self.assertEqual(entry_id.text,
+ "http://localhost/v1.1/")
+ self.assertEqual(entry_title.tag.split('}')[1], "title")
+ self.assertEqual(entry_title.get('type'), "text")
+ self.assertEqual(entry_title.text, "Version v1.1")
+ self.assertEqual(entry_updated.tag.split('}')[1], "updated")
+ self.assertEqual(entry_updated.text, "2011-01-21T11:33:21Z")
+
+ for i, link in enumerate(versions_data["version"]["links"]):
+ self.assertEqual(entry_links[i].tag.split('}')[1], "link")
+ for key, val in versions_data["version"]["links"][i].items():
+ self.assertEqual(entry_links[i].get(key), val)
+
+ self.assertEqual(entry_content.tag.split('}')[1], "content")
+ self.assertEqual(entry_content.get('type'), "text")
+ self.assertEqual(entry_content.text,
+ "Version v1.1 CURRENT (2011-01-21T11:33:21Z)")
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
index 6a6e13d93..3deb844aa 100644
--- a/nova/tests/api/openstack/test_zones.py
+++ b/nova/tests/api/openstack/test_zones.py
@@ -95,31 +95,15 @@ def zone_select(context, specs):
class ZonesTest(test.TestCase):
def setUp(self):
super(ZonesTest, self).setUp()
- self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.reset_fake_data()
- fakes.FakeAuthDatabase.data = {}
+ self.flags(allow_admin_api=True)
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_auth(self.stubs)
-
- self.allow_admin = FLAGS.allow_admin_api
- FLAGS.allow_admin_api = True
self.stubs.Set(nova.db, 'zone_get', zone_get)
self.stubs.Set(nova.db, 'zone_update', zone_update)
self.stubs.Set(nova.db, 'zone_create', zone_create)
self.stubs.Set(nova.db, 'zone_delete', zone_delete)
- self.old_zone_name = FLAGS.zone_name
- self.old_zone_capabilities = FLAGS.zone_capabilities
-
- def tearDown(self):
- self.stubs.UnsetAll()
- FLAGS.allow_admin_api = self.allow_admin
- FLAGS.zone_name = self.old_zone_name
- FLAGS.zone_capabilities = self.old_zone_capabilities
- super(ZonesTest, self).tearDown()
-
def test_get_zone_list_scheduler(self):
self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler)
req = webob.Request.blank('/v1.0/zones')
@@ -190,8 +174,8 @@ class ZonesTest(test.TestCase):
self.assertFalse('username' in res_dict['zone'])
def test_zone_info(self):
- FLAGS.zone_name = 'darksecret'
- FLAGS.zone_capabilities = ['cap1=a;b', 'cap2=c;d']
+ caps = ['cap1=a;b', 'cap2=c;d']
+ self.flags(zone_name='darksecret', zone_capabilities=caps)
self.stubs.Set(api, '_call_scheduler', zone_capabilities)
body = dict(zone=dict(username='zeb', password='sneaky'))
@@ -205,7 +189,8 @@ class ZonesTest(test.TestCase):
self.assertEqual(res_dict['zone']['cap2'], 'c;d')
def test_zone_select(self):
- FLAGS.build_plan_encryption_key = 'c286696d887c9aa0611bbb3e2025a45a'
+ key = 'c286696d887c9aa0611bbb3e2025a45a'
+ self.flags(build_plan_encryption_key=key)
self.stubs.Set(api, 'select', zone_select)
req = webob.Request.blank('/v1.0/zones/select')
diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py
index aac3ff330..d51b19ccd 100644
--- a/nova/tests/glance/stubs.py
+++ b/nova/tests/glance/stubs.py
@@ -60,7 +60,10 @@ class FakeGlance(object):
'container_format': 'ovf'},
'image_data': StringIO.StringIO('')}}
- def __init__(self, host, port=None, use_ssl=False):
+ def __init__(self, host, port=None, use_ssl=False, auth_tok=None):
+ pass
+
+ def set_auth_token(self, auth_tok):
pass
def get_image_meta(self, image_id):
diff --git a/nova/tests/hyperv_unittest.py b/nova/tests/hyperv_unittest.py
index 042819b9c..0ea196950 100644
--- a/nova/tests/hyperv_unittest.py
+++ b/nova/tests/hyperv_unittest.py
@@ -23,7 +23,6 @@ from nova import context
from nova import db
from nova import flags
from nova import test
-from nova.auth import manager
from nova.virt import hyperv
FLAGS = flags.FLAGS
@@ -34,11 +33,9 @@ class HyperVTestCase(test.TestCase):
"""Test cases for the Hyper-V driver"""
def setUp(self):
super(HyperVTestCase, self).setUp()
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake',
- admin=True)
- self.project = self.manager.create_project('fake', 'fake', 'fake')
- self.context = context.RequestContext(self.user, self.project)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
def test_create_destroy(self):
"""Create a VM and destroy it"""
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 223e7ae57..5a40f578f 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -31,6 +31,9 @@ class StubGlanceClient(object):
self.add_response = add_response
self.update_response = update_response
+ def set_auth_token(self, auth_tok):
+ pass
+
def get_image_meta(self, image_id):
return self.images[image_id]
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 4e8e85c7b..67b3c485a 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -305,5 +305,6 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
# Cleanup
self._delete_server(server_id)
+
if __name__ == "__main__":
unittest.main()
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index daea826fd..6a56a57db 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -23,7 +23,6 @@ import datetime
import mox
import novaclient.exceptions
import stubout
-import webob
from mox import IgnoreArg
from nova import context
@@ -34,12 +33,10 @@ from nova import service
from nova import test
from nova import rpc
from nova import utils
-from nova.auth import manager as auth_manager
from nova.scheduler import api
from nova.scheduler import manager
from nova.scheduler import driver
from nova.compute import power_state
-from nova.db.sqlalchemy import models
FLAGS = flags.FLAGS
@@ -250,23 +247,17 @@ class SimpleDriverTestCase(test.TestCase):
volume_driver='nova.volume.driver.FakeISCSIDriver',
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
self.scheduler = manager.SchedulerManager()
- self.manager = auth_manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake')
- self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.get_admin_context()
-
- def tearDown(self):
- self.manager.delete_user(self.user)
- self.manager.delete_project(self.project)
- super(SimpleDriverTestCase, self).tearDown()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
def _create_instance(self, **kwargs):
"""Create a test instance"""
inst = {}
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
- inst['user_id'] = self.user.id
- inst['project_id'] = self.project.id
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
inst['instance_type_id'] = '1'
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['ami_launch_index'] = 0
@@ -485,11 +476,6 @@ class SimpleDriverTestCase(test.TestCase):
self.assertEqual(host, 'host2')
volume1.delete_volume(self.context, volume_id1)
db.volume_destroy(self.context, volume_id2)
- dic = {'service_id': s_ref['id'],
- 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
- 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10,
- 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
- 'cpu_info': ''}
def test_doesnt_report_disabled_hosts_as_up(self):
"""Ensures driver doesn't find hosts before they are enabled"""
diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py
index d74b71fb6..7833028c3 100644
--- a/nova/tests/scheduler/test_zone_aware_scheduler.py
+++ b/nova/tests/scheduler/test_zone_aware_scheduler.py
@@ -16,6 +16,8 @@
Tests For Zone Aware Scheduler.
"""
+import json
+
import nova.db
from nova import exception
@@ -327,3 +329,19 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
sched._provision_resource_from_blob(None, request_spec, 1,
request_spec, {})
self.assertTrue(was_called)
+
+ def test_decrypt_blob(self):
+ """Test that the decrypt method works."""
+
+ fixture = FakeZoneAwareScheduler()
+ test_data = {"foo": "bar"}
+
+ class StubDecryptor(object):
+ def decryptor(self, key):
+ return lambda blob: blob
+
+ self.stubs.Set(zone_aware_scheduler, 'crypto',
+ StubDecryptor())
+
+ self.assertEqual(fixture._decrypt_blob(test_data),
+ json.dumps(test_data))
diff --git a/nova/tests/test_access.py b/nova/tests/test_access.py
index e170ccee6..3b54fc249 100644
--- a/nova/tests/test_access.py
+++ b/nova/tests/test_access.py
@@ -16,7 +16,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
import webob
from nova import context
@@ -41,7 +40,7 @@ class FakeApiRequest(object):
class AccessTestCase(test.TestCase):
def _env_for(self, ctxt, action):
env = {}
- env['ec2.context'] = ctxt
+ env['nova.context'] = ctxt
env['ec2.request'] = FakeApiRequest(action)
return env
@@ -93,7 +92,11 @@ class AccessTestCase(test.TestCase):
super(AccessTestCase, self).tearDown()
def response_status(self, user, methodName):
- ctxt = context.RequestContext(user, self.project)
+ roles = manager.AuthManager().get_active_roles(user, self.project)
+ ctxt = context.RequestContext(user.id,
+ self.project.id,
+ is_admin=user.is_admin(),
+ roles=roles)
environ = self._env_for(ctxt, methodName)
req = webob.Request.blank('/', environ)
resp = req.get_response(self.mw)
@@ -105,30 +108,26 @@ class AccessTestCase(test.TestCase):
def shouldDeny(self, user, methodName):
self.assertEqual(401, self.response_status(user, methodName))
- def test_001_allow_all(self):
+ def test_allow_all(self):
users = [self.testadmin, self.testpmsys, self.testnet, self.testsys]
for user in users:
self.shouldAllow(user, '_allow_all')
- def test_002_allow_none(self):
+ def test_allow_none(self):
self.shouldAllow(self.testadmin, '_allow_none')
users = [self.testpmsys, self.testnet, self.testsys]
for user in users:
self.shouldDeny(user, '_allow_none')
- def test_003_allow_project_manager(self):
+ def test_allow_project_manager(self):
for user in [self.testadmin, self.testpmsys]:
self.shouldAllow(user, '_allow_project_manager')
for user in [self.testnet, self.testsys]:
self.shouldDeny(user, '_allow_project_manager')
- def test_004_allow_sys_and_net(self):
+ def test_allow_sys_and_net(self):
for user in [self.testadmin, self.testnet, self.testsys]:
self.shouldAllow(user, '_allow_sys_and_net')
# denied because it doesn't have the per project sysadmin
for user in [self.testpmsys]:
self.shouldDeny(user, '_allow_sys_and_net')
-
-if __name__ == "__main__":
- # TODO: Implement use_fake as an option
- unittest.main()
diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py
index 877cf4ea1..06cc498ac 100644
--- a/nova/tests/test_adminapi.py
+++ b/nova/tests/test_adminapi.py
@@ -25,7 +25,6 @@ from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
-from nova.auth import manager
from nova.api.ec2 import admin
from nova.image import fake
@@ -39,7 +38,7 @@ class AdminApiTestCase(test.TestCase):
super(AdminApiTestCase, self).setUp()
self.flags(connection_type='fake')
- self.conn = rpc.Connection.instance()
+ self.conn = rpc.create_connection()
# set up our cloud
self.api = admin.AdminController()
@@ -51,11 +50,11 @@ class AdminApiTestCase(test.TestCase):
self.volume = self.start_service('volume')
self.image_service = utils.import_object(FLAGS.image_service)
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('admin', 'admin', 'admin', True)
- self.project = self.manager.create_project('proj', 'admin', 'proj')
- self.context = context.RequestContext(user=self.user,
- project=self.project)
+ self.user_id = 'admin'
+ self.project_id = 'admin'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ True)
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
@@ -73,11 +72,6 @@ class AdminApiTestCase(test.TestCase):
self.stubs.Set(rpc, 'cast', finish_cast)
- def tearDown(self):
- self.manager.delete_project(self.project)
- self.manager.delete_user(self.user)
- super(AdminApiTestCase, self).tearDown()
-
def test_block_external_ips(self):
"""Make sure provider firewall rules are created."""
result = self.api.block_external_addresses(self.context, '1.1.1.1/32')
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index fe7fd8402..d9b1d39c9 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -30,11 +30,11 @@ import webob
from nova import context
from nova import exception
from nova import test
+from nova import wsgi
from nova.api import ec2
from nova.api.ec2 import apirequest
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
-from nova.auth import manager
class FakeHttplibSocket(object):
@@ -192,10 +192,13 @@ class ApiEc2TestCase(test.TestCase):
"""Unit test for the cloud controller on an EC2 API"""
def setUp(self):
super(ApiEc2TestCase, self).setUp()
- self.manager = manager.AuthManager()
self.host = '127.0.0.1'
- self.app = ec2.Authenticate(ec2.Requestify(ec2.Executor(),
- 'nova.api.ec2.cloud.CloudController'))
+ # NOTE(vish): skipping the Authorizer
+ roles = ['sysadmin', 'netadmin']
+ ctxt = context.RequestContext('fake', 'fake', roles=roles)
+ self.app = wsgi.InjectContext(ctxt,
+ ec2.Requestify(ec2.Authorizer(ec2.Executor()),
+ 'nova.api.ec2.cloud.CloudController'))
def expect_http(self, host=None, is_secure=False, api_version=None):
"""Returns a new EC2 connection"""
@@ -246,39 +249,25 @@ class ApiEc2TestCase(test.TestCase):
self.expect_http(api_version='2010-10-30')
self.mox.ReplayAll()
- user = self.manager.create_user('fake', 'fake', 'fake')
- project = self.manager.create_project('fake', 'fake', 'fake')
-
# Any request should be fine
self.ec2.get_all_instances()
self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
'The version in the xmlns of the response does '
'not match the API version given in the request.')
- self.manager.delete_project(project)
- self.manager.delete_user(user)
-
def test_describe_instances(self):
"""Test that, after creating a user and a project, the describe
instances call to the API works properly"""
self.expect_http()
self.mox.ReplayAll()
- user = self.manager.create_user('fake', 'fake', 'fake')
- project = self.manager.create_project('fake', 'fake', 'fake')
self.assertEqual(self.ec2.get_all_instances(), [])
- self.manager.delete_project(project)
- self.manager.delete_user(user)
def test_terminate_invalid_instance(self):
"""Attempt to terminate an invalid instance"""
self.expect_http()
self.mox.ReplayAll()
- user = self.manager.create_user('fake', 'fake', 'fake')
- project = self.manager.create_project('fake', 'fake', 'fake')
self.assertRaises(EC2ResponseError, self.ec2.terminate_instances,
"i-00000005")
- self.manager.delete_project(project)
- self.manager.delete_user(user)
def test_get_all_key_pairs(self):
"""Test that, after creating a user and project and generating
@@ -287,16 +276,12 @@ class ApiEc2TestCase(test.TestCase):
self.mox.ReplayAll()
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
for x in range(random.randint(4, 8)))
- user = self.manager.create_user('fake', 'fake', 'fake')
- project = self.manager.create_project('fake', 'fake', 'fake')
# NOTE(vish): create depends on pool, so call helper directly
- cloud._gen_key(context.get_admin_context(), user.id, keyname)
+ cloud._gen_key(context.get_admin_context(), 'fake', keyname)
rv = self.ec2.get_all_key_pairs()
results = [k for k in rv if k.name == keyname]
self.assertEquals(len(results), 1)
- self.manager.delete_project(project)
- self.manager.delete_user(user)
def test_create_duplicate_key_pair(self):
"""Test that, after successfully generating a keypair,
@@ -305,8 +290,6 @@ class ApiEc2TestCase(test.TestCase):
self.mox.ReplayAll()
keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
for x in range(random.randint(4, 8)))
- user = self.manager.create_user('fake', 'fake', 'fake')
- project = self.manager.create_project('fake', 'fake', 'fake')
# NOTE(vish): create depends on pool, so call helper directly
self.ec2.create_key_pair('test')
@@ -325,27 +308,16 @@ class ApiEc2TestCase(test.TestCase):
"""Test that we can retrieve security groups"""
self.expect_http()
self.mox.ReplayAll()
- user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
- project = self.manager.create_project('fake', 'fake', 'fake')
rv = self.ec2.get_all_security_groups()
self.assertEquals(len(rv), 1)
self.assertEquals(rv[0].name, 'default')
- self.manager.delete_project(project)
- self.manager.delete_user(user)
-
def test_create_delete_security_group(self):
"""Test that we can create a security group"""
self.expect_http()
self.mox.ReplayAll()
- user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
- project = self.manager.create_project('fake', 'fake', 'fake')
-
- # At the moment, you need both of these to actually be netadmin
- self.manager.add_role('fake', 'netadmin')
- project.add_role('fake', 'netadmin')
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
@@ -364,9 +336,6 @@ class ApiEc2TestCase(test.TestCase):
self.ec2.delete_security_group(security_group_name)
- self.manager.delete_project(project)
- self.manager.delete_user(user)
-
def test_authorize_revoke_security_group_cidr(self):
"""
Test that we can add and remove CIDR based rules
@@ -374,12 +343,6 @@ class ApiEc2TestCase(test.TestCase):
"""
self.expect_http()
self.mox.ReplayAll()
- user = self.manager.create_user('fake', 'fake', 'fake')
- project = self.manager.create_project('fake', 'fake', 'fake')
-
- # At the moment, you need both of these to actually be netadmin
- self.manager.add_role('fake', 'netadmin')
- project.add_role('fake', 'netadmin')
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
@@ -426,9 +389,6 @@ class ApiEc2TestCase(test.TestCase):
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
- self.manager.delete_project(project)
- self.manager.delete_user(user)
-
return
def test_authorize_revoke_security_group_cidr_v6(self):
@@ -438,12 +398,6 @@ class ApiEc2TestCase(test.TestCase):
"""
self.expect_http()
self.mox.ReplayAll()
- user = self.manager.create_user('fake', 'fake', 'fake')
- project = self.manager.create_project('fake', 'fake', 'fake')
-
- # At the moment, you need both of these to actually be netadmin
- self.manager.add_role('fake', 'netadmin')
- project.add_role('fake', 'netadmin')
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
for x in range(random.randint(4, 8)))
@@ -489,9 +443,6 @@ class ApiEc2TestCase(test.TestCase):
self.assertEqual(len(rv), 1)
self.assertEqual(rv[0].name, 'default')
- self.manager.delete_project(project)
- self.manager.delete_user(user)
-
return
def test_authorize_revoke_security_group_foreign_group(self):
@@ -501,12 +452,6 @@ class ApiEc2TestCase(test.TestCase):
"""
self.expect_http()
self.mox.ReplayAll()
- user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
- project = self.manager.create_project('fake', 'fake', 'fake')
-
- # At the moment, you need both of these to actually be netadmin
- self.manager.add_role('fake', 'netadmin')
- project.add_role('fake', 'netadmin')
rand_string = 'sdiuisudfsdcnpaqwertasd'
security_group_name = "".join(random.choice(rand_string)
@@ -560,8 +505,3 @@ class ApiEc2TestCase(test.TestCase):
self.mox.ReplayAll()
self.ec2.delete_security_group(security_group_name)
-
- self.manager.delete_project(project)
- self.manager.delete_user(user)
-
- return
diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py
index 71e0d17c9..7c0f783bb 100644
--- a/nova/tests/test_auth.py
+++ b/nova/tests/test_auth.py
@@ -102,7 +102,7 @@ class _AuthManagerBaseTestCase(test.TestCase):
self.assertEqual('classified', u.secret)
self.assertEqual('private-party', u.access)
- def test_004_signature_is_valid(self):
+ def test_signature_is_valid(self):
with user_generator(self.manager, name='admin', secret='admin',
access='admin'):
with project_generator(self.manager, name="admin",
@@ -141,15 +141,14 @@ class _AuthManagerBaseTestCase(test.TestCase):
'127.0.0.1',
'/services/Cloud'))
- def test_005_can_get_credentials(self):
- return
- credentials = self.manager.get_user('test1').get_credentials()
- self.assertEqual(credentials,
- 'export EC2_ACCESS_KEY="access"\n' +
- 'export EC2_SECRET_KEY="secret"\n' +
- 'export EC2_URL="http://127.0.0.1:8773/services/Cloud"\n' +
- 'export S3_URL="http://127.0.0.1:3333/"\n' +
- 'export EC2_USER_ID="test1"\n')
+ def test_can_get_credentials(self):
+ st = {'access': 'access', 'secret': 'secret'}
+ with user_and_project_generator(self.manager, user_state=st) as (u, p):
+ credentials = self.manager.get_environment_rc(u, p)
+ LOG.debug(credentials)
+ self.assertTrue('export EC2_ACCESS_KEY="access:testproj"\n'
+ in credentials)
+ self.assertTrue('export EC2_SECRET_KEY="secret"\n' in credentials)
def test_can_list_users(self):
with user_generator(self.manager):
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index e419e7a50..8c1a74c70 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -34,7 +34,6 @@ from nova import network
from nova import rpc
from nova import test
from nova import utils
-from nova.auth import manager
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.image import fake
@@ -50,7 +49,7 @@ class CloudTestCase(test.TestCase):
self.flags(connection_type='fake',
stub_network=True)
- self.conn = rpc.Connection.instance()
+ self.conn = rpc.create_connection()
# set up our cloud
self.cloud = cloud.CloudController()
@@ -62,12 +61,11 @@ class CloudTestCase(test.TestCase):
self.volume = self.start_service('volume')
self.image_service = utils.import_object(FLAGS.image_service)
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('admin', 'admin', 'admin', True)
- self.project = self.manager.create_project('proj', 'admin', 'proj')
- self.context = context.RequestContext(user=self.user,
- project=self.project)
- host = self.network.host
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ True)
def fake_show(meh, context, id):
return {'id': 1, 'container_format': 'ami',
@@ -87,17 +85,15 @@ class CloudTestCase(test.TestCase):
self.stubs.Set(rpc, 'cast', finish_cast)
def tearDown(self):
- networks = db.project_get_networks(self.context, self.project.id,
+ networks = db.project_get_networks(self.context, self.project_id,
associate=False)
for network in networks:
db.network_disassociate(self.context, network['id'])
- self.manager.delete_project(self.project)
- self.manager.delete_user(self.user)
super(CloudTestCase, self).tearDown()
def _create_key(self, name):
# NOTE(vish): create depends on pool, so just call helper directly
- return cloud._gen_key(self.context, self.context.user.id, name)
+ return cloud._gen_key(self.context, self.context.user_id, name)
def test_describe_regions(self):
"""Makes sure describe regions runs without raising an exception"""
@@ -326,22 +322,15 @@ class CloudTestCase(test.TestCase):
revoke = self.cloud.revoke_security_group_ingress
self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
- def test_revoke_security_group_ingress_by_id(self):
- kwargs = {'project_id': self.context.project_id, 'name': 'test'}
- sec = db.security_group_create(self.context, kwargs)
- authz = self.cloud.authorize_security_group_ingress
- kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
- authz(self.context, group_id=sec['id'], **kwargs)
- revoke = self.cloud.revoke_security_group_ingress
- self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
-
- def test_authorize_security_group_ingress_by_id(self):
+ def test_authorize_revoke_security_group_ingress_by_id(self):
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
authz = self.cloud.authorize_security_group_ingress
kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
- self.assertTrue(authz(self.context, group_id=sec['id'], **kwargs))
+ authz(self.context, group_id=sec['id'], **kwargs)
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
def test_authorize_security_group_ingress_missing_protocol_params(self):
sec = db.security_group_create(self.context,
@@ -961,21 +950,6 @@ class CloudTestCase(test.TestCase):
self._wait_for_running(ec2_instance_id)
return ec2_instance_id
- def test_rescue_unrescue_instance(self):
- instance_id = self._run_instance(
- image_id='ami-1',
- instance_type=FLAGS.default_instance_type,
- max_count=1)
- self.cloud.rescue_instance(context=self.context,
- instance_id=instance_id)
- # NOTE(vish): This currently does no validation, it simply makes sure
- # that the code path doesn't throw an exception.
- self.cloud.unrescue_instance(context=self.context,
- instance_id=instance_id)
- # TODO(soren): We need this until we can stop polling in the rpc code
- # for unit tests.
- self.cloud.terminate_instances(self.context, [instance_id])
-
def test_console_output(self):
instance_id = self._run_instance(
image_id='ami-1',
@@ -1004,7 +978,7 @@ class CloudTestCase(test.TestCase):
key = RSA.load_key_string(private_key, callback=lambda: None)
bio = BIO.MemoryBuffer()
public_key = db.key_pair_get(self.context,
- self.context.user.id,
+ self.context.user_id,
'test')['public_key']
key.save_pub_key_bio(bio)
converted = crypto.ssl_pub_to_ssh_pub(bio.read())
@@ -1028,7 +1002,7 @@ class CloudTestCase(test.TestCase):
'mytestfprint')
self.assertTrue(result1)
keydata = db.key_pair_get(self.context,
- self.context.user.id,
+ self.context.user_id,
'testimportkey1')
self.assertEqual('mytestpubkey', keydata['public_key'])
self.assertEqual('mytestfprint', keydata['fingerprint'])
@@ -1045,7 +1019,7 @@ class CloudTestCase(test.TestCase):
dummypub)
self.assertTrue(result2)
keydata = db.key_pair_get(self.context,
- self.context.user.id,
+ self.context.user_id,
'testimportkey2')
self.assertEqual(dummypub, keydata['public_key'])
self.assertEqual(dummyfprint, keydata['fingerprint'])
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 0ede4f469..879e4b9cb 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -19,10 +19,6 @@
Tests For Compute
"""
-import mox
-import stubout
-
-from nova.auth import manager
from nova import compute
from nova.compute import instance_types
from nova.compute import manager as compute_manager
@@ -67,10 +63,9 @@ class ComputeTestCase(test.TestCase):
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
self.compute_api = compute.API()
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake')
- self.project = self.manager.create_project('fake', 'fake', 'fake')
- self.context = context.RequestContext('fake', 'fake', False)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
test_notifier.NOTIFICATIONS = []
def fake_show(meh, context, id):
@@ -78,19 +73,14 @@ class ComputeTestCase(test.TestCase):
self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show)
- def tearDown(self):
- self.manager.delete_user(self.user)
- self.manager.delete_project(self.project)
- super(ComputeTestCase, self).tearDown()
-
def _create_instance(self, params={}):
"""Create a test instance"""
inst = {}
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
- inst['user_id'] = self.user.id
- inst['project_id'] = self.project.id
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
@@ -115,8 +105,8 @@ class ComputeTestCase(test.TestCase):
def _create_group(self):
values = {'name': 'testgroup',
'description': 'testgroup',
- 'user_id': self.user.id,
- 'project_id': self.project.id}
+ 'user_id': self.user_id,
+ 'project_id': self.project_id}
return db.security_group_create(self.context, values)
def _get_dummy_instance(self):
@@ -350,8 +340,8 @@ class ComputeTestCase(test.TestCase):
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.create')
payload = msg['payload']
- self.assertEquals(payload['tenant_id'], self.project.id)
- self.assertEquals(payload['user_id'], self.user.id)
+ self.assertEquals(payload['tenant_id'], self.project_id)
+ self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
@@ -374,8 +364,8 @@ class ComputeTestCase(test.TestCase):
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.delete')
payload = msg['payload']
- self.assertEquals(payload['tenant_id'], self.project.id)
- self.assertEquals(payload['user_id'], self.user.id)
+ self.assertEquals(payload['tenant_id'], self.project_id)
+ self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
@@ -420,7 +410,7 @@ class ComputeTestCase(test.TestCase):
def fake(*args, **kwargs):
pass
- self.stubs.Set(self.compute.driver, 'finish_resize', fake)
+ self.stubs.Set(self.compute.driver, 'finish_migration', fake)
self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake)
context = self.context.elevated()
instance_id = self._create_instance()
@@ -457,8 +447,8 @@ class ComputeTestCase(test.TestCase):
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.resize.prep')
payload = msg['payload']
- self.assertEquals(payload['tenant_id'], self.project.id)
- self.assertEquals(payload['user_id'], self.user.id)
+ self.assertEquals(payload['tenant_id'], self.project_id)
+ self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance_id)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
@@ -506,8 +496,8 @@ class ComputeTestCase(test.TestCase):
db.instance_update(self.context, instance_id,
{'instance_type_id': inst_type['id']})
- self.assertRaises(exception.ApiError, self.compute_api.resize,
- context, instance_id, 1)
+ self.assertRaises(exception.CannotResizeToSmallerSize,
+ self.compute_api.resize, context, instance_id, 1)
self.compute.terminate_instance(context, instance_id)
@@ -518,8 +508,8 @@ class ComputeTestCase(test.TestCase):
self.compute.run_instance(self.context, instance_id)
- self.assertRaises(exception.ApiError, self.compute_api.resize,
- context, instance_id, 1)
+ self.assertRaises(exception.CannotResizeToSameSize,
+ self.compute_api.resize, context, instance_id, 1)
self.compute.terminate_instance(context, instance_id)
@@ -531,8 +521,8 @@ class ComputeTestCase(test.TestCase):
def fake(*args, **kwargs):
pass
- self.stubs.Set(self.compute.driver, 'finish_resize', fake)
- self.stubs.Set(self.compute.driver, 'revert_resize', fake)
+ self.stubs.Set(self.compute.driver, 'finish_migration', fake)
+ self.stubs.Set(self.compute.driver, 'revert_migration', fake)
self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake)
self.compute.run_instance(self.context, instance_id)
@@ -849,7 +839,6 @@ class ComputeTestCase(test.TestCase):
def test_run_kill_vm(self):
"""Detect when a vm is terminated behind the scenes"""
- self.stubs = stubout.StubOutForTesting()
self.stubs.Set(compute_manager.ComputeManager,
'_report_driver_status', nop_report_driver_status)
diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py
index 1806cc1ea..cf7f592cf 100644
--- a/nova/tests/test_console.py
+++ b/nova/tests/test_console.py
@@ -26,10 +26,9 @@ from nova import exception
from nova import flags
from nova import test
from nova import utils
-from nova.auth import manager
-from nova.console import manager as console_manager
FLAGS = flags.FLAGS
+flags.DECLARE('console_driver', 'nova.console.manager')
class ConsoleTestCase(test.TestCase):
@@ -39,17 +38,11 @@ class ConsoleTestCase(test.TestCase):
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
stub_compute=True)
self.console = utils.import_object(FLAGS.console_manager)
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake')
- self.project = self.manager.create_project('fake', 'fake', 'fake')
- self.context = context.get_admin_context()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
self.host = 'test_compute_host'
- def tearDown(self):
- self.manager.delete_user(self.user)
- self.manager.delete_project(self.project)
- super(ConsoleTestCase, self).tearDown()
-
def _create_instance(self):
"""Create a test instance"""
inst = {}
@@ -58,8 +51,8 @@ class ConsoleTestCase(test.TestCase):
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
- inst['user_id'] = self.user.id
- inst['project_id'] = self.project.id
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
inst['instance_type_id'] = 1
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id']
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 107fd03e3..0c07cbb7c 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -22,7 +22,6 @@ from nova import test
from nova import context
from nova import db
from nova import flags
-from nova.auth import manager
FLAGS = flags.FLAGS
@@ -45,42 +44,35 @@ def _setup_networking(instance_id, ip='1.2.3.4', flo_addr='1.2.1.2'):
db.fixed_ip_create(ctxt, fixed_ip)
fix_ref = db.fixed_ip_get_by_address(ctxt, ip)
db.floating_ip_create(ctxt, {'address': flo_addr,
- 'fixed_ip_id': fix_ref.id})
+ 'fixed_ip_id': fix_ref['id']})
class DbApiTestCase(test.TestCase):
def setUp(self):
super(DbApiTestCase, self).setUp()
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('admin', 'admin', 'admin', True)
- self.project = self.manager.create_project('proj', 'admin', 'proj')
- self.context = context.RequestContext(user=self.user,
- project=self.project)
-
- def tearDown(self):
- self.manager.delete_project(self.project)
- self.manager.delete_user(self.user)
- super(DbApiTestCase, self).tearDown()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
def test_instance_get_project_vpn(self):
- result = db.fixed_ip_get_all(self.context)
values = {'instance_type_id': FLAGS.default_instance_type,
'image_ref': FLAGS.vpn_image_id,
- 'project_id': self.project.id
+ 'project_id': self.project_id,
}
instance = db.instance_create(self.context, values)
- result = db.instance_get_project_vpn(self.context, self.project.id)
- self.assertEqual(instance.id, result.id)
+ result = db.instance_get_project_vpn(self.context.elevated(),
+ self.project_id)
+ self.assertEqual(instance['id'], result['id'])
def test_instance_get_project_vpn_joins(self):
- result = db.fixed_ip_get_all(self.context)
values = {'instance_type_id': FLAGS.default_instance_type,
'image_ref': FLAGS.vpn_image_id,
- 'project_id': self.project.id
+ 'project_id': self.project_id,
}
instance = db.instance_create(self.context, values)
- _setup_networking(instance.id)
- result = db.instance_get_project_vpn(self.context, self.project.id)
- self.assertEqual(instance.id, result.id)
+ _setup_networking(instance['id'])
+ result = db.instance_get_project_vpn(self.context.elevated(),
+ self.project_id)
+ self.assertEqual(instance['id'], result['id'])
self.assertEqual(result['fixed_ips'][0]['floating_ips'][0].address,
'1.2.1.2')
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index bab4d200b..cf25ce215 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -32,7 +32,6 @@ from nova import flags
from nova import test
from nova import utils
from nova.api.ec2 import cloud
-from nova.auth import manager
from nova.compute import power_state
from nova.virt.libvirt import connection
from nova.virt.libvirt import firewall
@@ -154,36 +153,15 @@ class LibvirtConnTestCase(test.TestCase):
super(LibvirtConnTestCase, self).setUp()
connection._late_load_cheetah()
self.flags(fake_call=True)
- self.manager = manager.AuthManager()
-
- try:
- pjs = self.manager.get_projects()
- pjs = [p for p in pjs if p.name == 'fake']
- if 0 != len(pjs):
- self.manager.delete_project(pjs[0])
-
- users = self.manager.get_users()
- users = [u for u in users if u.name == 'fake']
- if 0 != len(users):
- self.manager.delete_user(users[0])
- except Exception, e:
- pass
-
- users = self.manager.get_users()
- self.user = self.manager.create_user('fake', 'fake', 'fake',
- admin=True)
- self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
self.network = utils.import_object(FLAGS.network_manager)
self.context = context.get_admin_context()
FLAGS.instances_path = ''
self.call_libvirt_dependant_setup = False
self.test_ip = '10.11.12.13'
- def tearDown(self):
- self.manager.delete_project(self.project)
- self.manager.delete_user(self.user)
- super(LibvirtConnTestCase, self).tearDown()
-
test_instance = {'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
@@ -239,7 +217,7 @@ class LibvirtConnTestCase(test.TestCase):
'mac_address': 'fake',
'ip_address': 'fake',
'dhcp_server': 'fake',
- 'extra_params': 'fake'
+ 'extra_params': 'fake',
}
# Creating mocks
@@ -368,7 +346,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
- conn.snapshot(instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
self.assertEquals(snapshot['properties']['image_state'], 'available')
@@ -408,7 +386,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
- conn.snapshot(instance_ref, recv_meta['id'])
+ conn.snapshot(self.context, instance_ref, recv_meta['id'])
snapshot = image_service.show(context, recv_meta['id'])
self.assertEquals(snapshot['properties']['image_state'], 'available')
@@ -441,8 +419,8 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(parameters[1].get('value'), 'fake')
def _check_xml_and_container(self, instance):
- user_context = context.RequestContext(project=self.project,
- user=self.user)
+ user_context = context.RequestContext(self.user_id,
+ self.project_id)
instance_ref = db.instance_create(user_context, instance)
_setup_networking(instance_ref['id'], self.test_ip)
@@ -470,11 +448,10 @@ class LibvirtConnTestCase(test.TestCase):
def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
rescue=False):
- user_context = context.RequestContext(project=self.project,
- user=self.user)
+ user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, instance)
network_ref = db.project_get_networks(context.get_admin_context(),
- self.project.id)[0]
+ self.project_id)[0]
_setup_networking(instance_ref['id'], self.test_ip)
@@ -759,7 +736,7 @@ class LibvirtConnTestCase(test.TestCase):
network_info = _create_network_info()
try:
- conn.spawn(instance, network_info)
+ conn.spawn(self.context, instance, network_info)
except Exception, e:
count = (0 <= str(e.message).find('Unexpected method call'))
@@ -802,11 +779,9 @@ class IptablesFirewallTestCase(test.TestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake',
- admin=True)
- self.project = self.manager.create_project('fake', 'fake', 'fake')
- self.context = context.RequestContext('fake', 'fake')
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
self.network = utils.import_object(FLAGS.network_manager)
class FakeLibvirtConnection(object):
@@ -832,11 +807,6 @@ class IptablesFirewallTestCase(test.TestCase):
connection.libxml2 = __import__('libxml2')
return True
- def tearDown(self):
- self.manager.delete_project(self.project)
- self.manager.delete_user(self.user)
- super(IptablesFirewallTestCase, self).tearDown()
-
in_nat_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
@@ -1119,11 +1089,9 @@ class NWFilterTestCase(test.TestCase):
class Mock(object):
pass
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake',
- admin=True)
- self.project = self.manager.create_project('fake', 'fake', 'fake')
- self.context = context.RequestContext(self.user, self.project)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
self.fake_libvirt_connection = Mock()
@@ -1131,11 +1099,6 @@ class NWFilterTestCase(test.TestCase):
self.fw = firewall.NWFilterFirewall(
lambda: self.fake_libvirt_connection)
- def tearDown(self):
- self.manager.delete_project(self.project)
- self.manager.delete_user(self.user)
- super(NWFilterTestCase, self).tearDown()
-
def test_cidr_rule_nwfilter_xml(self):
cloud_controller = cloud.CloudController()
cloud_controller.create_security_group(self.context,
diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py
index 39b4e18d7..0b2dce20e 100644
--- a/nova/tests/test_objectstore.py
+++ b/nova/tests/test_objectstore.py
@@ -21,8 +21,6 @@ Unittets for S3 objectstore clone.
"""
import boto
-import glob
-import hashlib
import os
import shutil
import tempfile
@@ -30,12 +28,9 @@ import tempfile
from boto import exception as boto_exception
from boto.s3 import connection as s3
-from nova import context
-from nova import exception
from nova import flags
from nova import wsgi
from nova import test
-from nova.auth import manager
from nova.objectstore import s3server
@@ -57,15 +52,9 @@ class S3APITestCase(test.TestCase):
def setUp(self):
"""Setup users, projects, and start a test server."""
super(S3APITestCase, self).setUp()
- self.flags(auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
- buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
+ self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
s3_host='127.0.0.1')
- self.auth_manager = manager.AuthManager()
- self.admin_user = self.auth_manager.create_user('admin', admin=True)
- self.admin_project = self.auth_manager.create_project('admin',
- self.admin_user)
-
shutil.rmtree(FLAGS.buckets_path)
os.mkdir(FLAGS.buckets_path)
@@ -80,8 +69,8 @@ class S3APITestCase(test.TestCase):
boto.config.add_section('Boto')
boto.config.set('Boto', 'num_retries', '0')
- conn = s3.S3Connection(aws_access_key_id=self.admin_user.access,
- aws_secret_access_key=self.admin_user.secret,
+ conn = s3.S3Connection(aws_access_key_id='fake',
+ aws_secret_access_key='fake',
host=FLAGS.s3_host,
port=FLAGS.s3_port,
is_secure=False,
@@ -104,11 +93,11 @@ class S3APITestCase(test.TestCase):
self.assertEquals(buckets[0].name, name, "Wrong name")
return True
- def test_000_list_buckets(self):
+ def test_list_buckets(self):
"""Make sure we are starting with no buckets."""
self._ensure_no_buckets(self.conn.get_all_buckets())
- def test_001_create_and_delete_bucket(self):
+ def test_create_and_delete_bucket(self):
"""Test bucket creation and deletion."""
bucket_name = 'testbucket'
@@ -117,7 +106,7 @@ class S3APITestCase(test.TestCase):
self.conn.delete_bucket(bucket_name)
self._ensure_no_buckets(self.conn.get_all_buckets())
- def test_002_create_bucket_and_key_and_delete_key_again(self):
+ def test_create_bucket_and_key_and_delete_key_again(self):
"""Test key operations on buckets."""
bucket_name = 'testbucket'
key_name = 'somekey'
@@ -146,8 +135,6 @@ class S3APITestCase(test.TestCase):
bucket_name)
def tearDown(self):
- """Tear down auth and test server."""
- self.auth_manager.delete_user('admin')
- self.auth_manager.delete_project('admin')
+ """Tear down test server."""
self.server.stop()
super(S3APITestCase, self).tearDown()
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index a35caadf8..92393b536 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -20,12 +20,9 @@ from nova import compute
from nova import context
from nova import db
from nova import flags
-from nova import network
from nova import quota
from nova import test
-from nova import utils
from nova import volume
-from nova.auth import manager
from nova.compute import instance_types
@@ -48,25 +45,20 @@ class QuotaTestCase(test.TestCase):
quota_gigabytes=20,
quota_floating_ips=1)
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('admin', 'admin', 'admin', True)
- self.project = self.manager.create_project('admin', 'admin', 'admin')
self.network = self.network = self.start_service('network')
- self.context = context.RequestContext(project=self.project,
- user=self.user)
-
- def tearDown(self):
- manager.AuthManager().delete_project(self.project)
- manager.AuthManager().delete_user(self.user)
- super(QuotaTestCase, self).tearDown()
+ self.user_id = 'admin'
+ self.project_id = 'admin'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ True)
def _create_instance(self, cores=2):
"""Create a test instance"""
inst = {}
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
- inst['user_id'] = self.user.id
- inst['project_id'] = self.project.id
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
inst['instance_type_id'] = '3' # m1.large
inst['vcpus'] = cores
return db.instance_create(self.context, inst)['id']
@@ -74,8 +66,8 @@ class QuotaTestCase(test.TestCase):
def _create_volume(self, size=10):
"""Create a test volume"""
vol = {}
- vol['user_id'] = self.user.id
- vol['project_id'] = self.project.id
+ vol['user_id'] = self.user_id
+ vol['project_id'] = self.project_id
vol['size'] = size
return db.volume_create(self.context, vol)['id']
@@ -95,15 +87,15 @@ class QuotaTestCase(test.TestCase):
num_instances = quota.allowed_instances(self.context, 100,
self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 2)
- db.quota_create(self.context, self.project.id, 'instances', 10)
+ db.quota_create(self.context, self.project_id, 'instances', 10)
num_instances = quota.allowed_instances(self.context, 100,
self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 4)
- db.quota_create(self.context, self.project.id, 'cores', 100)
+ db.quota_create(self.context, self.project_id, 'cores', 100)
num_instances = quota.allowed_instances(self.context, 100,
self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 10)
- db.quota_create(self.context, self.project.id, 'ram', 3 * 2048)
+ db.quota_create(self.context, self.project_id, 'ram', 3 * 2048)
num_instances = quota.allowed_instances(self.context, 100,
self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 3)
@@ -113,13 +105,13 @@ class QuotaTestCase(test.TestCase):
num_metadata_items = quota.allowed_metadata_items(self.context,
too_many_items)
self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items)
- db.quota_create(self.context, self.project.id, 'metadata_items', 5)
+ db.quota_create(self.context, self.project_id, 'metadata_items', 5)
num_metadata_items = quota.allowed_metadata_items(self.context,
too_many_items)
self.assertEqual(num_metadata_items, 5)
# Cleanup
- db.quota_destroy_all_by_project(self.context, self.project.id)
+ db.quota_destroy_all_by_project(self.context, self.project_id)
def test_unlimited_instances(self):
FLAGS.quota_instances = 2
@@ -129,7 +121,7 @@ class QuotaTestCase(test.TestCase):
num_instances = quota.allowed_instances(self.context, 100,
instance_type)
self.assertEqual(num_instances, 2)
- db.quota_create(self.context, self.project.id, 'instances', None)
+ db.quota_create(self.context, self.project_id, 'instances', None)
num_instances = quota.allowed_instances(self.context, 100,
instance_type)
self.assertEqual(num_instances, 100)
@@ -145,7 +137,7 @@ class QuotaTestCase(test.TestCase):
num_instances = quota.allowed_instances(self.context, 100,
instance_type)
self.assertEqual(num_instances, 2)
- db.quota_create(self.context, self.project.id, 'ram', None)
+ db.quota_create(self.context, self.project_id, 'ram', None)
num_instances = quota.allowed_instances(self.context, 100,
instance_type)
self.assertEqual(num_instances, 100)
@@ -161,7 +153,7 @@ class QuotaTestCase(test.TestCase):
num_instances = quota.allowed_instances(self.context, 100,
instance_type)
self.assertEqual(num_instances, 2)
- db.quota_create(self.context, self.project.id, 'cores', None)
+ db.quota_create(self.context, self.project_id, 'cores', None)
num_instances = quota.allowed_instances(self.context, 100,
instance_type)
self.assertEqual(num_instances, 100)
@@ -174,7 +166,7 @@ class QuotaTestCase(test.TestCase):
FLAGS.quota_gigabytes = -1
volumes = quota.allowed_volumes(self.context, 100, 1)
self.assertEqual(volumes, 10)
- db.quota_create(self.context, self.project.id, 'volumes', None)
+ db.quota_create(self.context, self.project_id, 'volumes', None)
volumes = quota.allowed_volumes(self.context, 100, 1)
self.assertEqual(volumes, 100)
volumes = quota.allowed_volumes(self.context, 101, 1)
@@ -185,7 +177,7 @@ class QuotaTestCase(test.TestCase):
FLAGS.quota_gigabytes = 10
volumes = quota.allowed_volumes(self.context, 100, 1)
self.assertEqual(volumes, 10)
- db.quota_create(self.context, self.project.id, 'gigabytes', None)
+ db.quota_create(self.context, self.project_id, 'gigabytes', None)
volumes = quota.allowed_volumes(self.context, 100, 1)
self.assertEqual(volumes, 100)
volumes = quota.allowed_volumes(self.context, 101, 1)
@@ -195,7 +187,7 @@ class QuotaTestCase(test.TestCase):
FLAGS.quota_floating_ips = 10
floating_ips = quota.allowed_floating_ips(self.context, 100)
self.assertEqual(floating_ips, 10)
- db.quota_create(self.context, self.project.id, 'floating_ips', None)
+ db.quota_create(self.context, self.project_id, 'floating_ips', None)
floating_ips = quota.allowed_floating_ips(self.context, 100)
self.assertEqual(floating_ips, 100)
floating_ips = quota.allowed_floating_ips(self.context, 101)
@@ -205,7 +197,7 @@ class QuotaTestCase(test.TestCase):
FLAGS.quota_metadata_items = 10
items = quota.allowed_metadata_items(self.context, 100)
self.assertEqual(items, 10)
- db.quota_create(self.context, self.project.id, 'metadata_items', None)
+ db.quota_create(self.context, self.project_id, 'metadata_items', None)
items = quota.allowed_metadata_items(self.context, 100)
self.assertEqual(items, 100)
items = quota.allowed_metadata_items(self.context, 101)
@@ -273,11 +265,11 @@ class QuotaTestCase(test.TestCase):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
{'address': address,
- 'project_id': self.project.id})
+ 'project_id': self.project_id})
self.assertRaises(quota.QuotaError,
self.network.allocate_floating_ip,
self.context,
- self.project.id)
+ self.project_id)
db.floating_ip_destroy(context.get_admin_context(), address)
def test_too_many_metadata_items(self):
@@ -299,7 +291,7 @@ class QuotaTestCase(test.TestCase):
def test_overridden_allowed_injected_files(self):
FLAGS.quota_max_injected_files = 5
- db.quota_create(self.context, self.project.id, 'injected_files', 77)
+ db.quota_create(self.context, self.project_id, 'injected_files', 77)
self.assertEqual(quota.allowed_injected_files(self.context, 100), 77)
def test_unlimited_default_allowed_injected_files(self):
@@ -308,7 +300,7 @@ class QuotaTestCase(test.TestCase):
def test_unlimited_db_allowed_injected_files(self):
FLAGS.quota_max_injected_files = 5
- db.quota_create(self.context, self.project.id, 'injected_files', None)
+ db.quota_create(self.context, self.project_id, 'injected_files', None)
self.assertEqual(quota.allowed_injected_files(self.context, 100), 100)
def test_default_allowed_injected_file_content_bytes(self):
@@ -318,7 +310,7 @@ class QuotaTestCase(test.TestCase):
def test_overridden_allowed_injected_file_content_bytes(self):
FLAGS.quota_max_injected_file_content_bytes = 12345
- db.quota_create(self.context, self.project.id,
+ db.quota_create(self.context, self.project_id,
'injected_file_content_bytes', 5678)
limit = quota.allowed_injected_file_content_bytes(self.context, 23456)
self.assertEqual(limit, 5678)
@@ -330,7 +322,7 @@ class QuotaTestCase(test.TestCase):
def test_unlimited_db_allowed_injected_file_content_bytes(self):
FLAGS.quota_max_injected_file_content_bytes = 12345
- db.quota_create(self.context, self.project.id,
+ db.quota_create(self.context, self.project_id,
'injected_file_content_bytes', None)
limit = quota.allowed_injected_file_content_bytes(self.context, 23456)
self.assertEqual(limit, 23456)
diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py
index ffd748efe..2d2436175 100644
--- a/nova/tests/test_rpc.py
+++ b/nova/tests/test_rpc.py
@@ -33,11 +33,12 @@ LOG = logging.getLogger('nova.tests.rpc')
class RpcTestCase(test.TestCase):
def setUp(self):
super(RpcTestCase, self).setUp()
- self.conn = rpc.Connection.instance(True)
+ self.conn = rpc.create_connection(True)
self.receiver = TestReceiver()
- self.consumer = rpc.TopicAdapterConsumer(connection=self.conn,
- topic='test',
- proxy=self.receiver)
+ self.consumer = rpc.create_consumer(self.conn,
+ 'test',
+ self.receiver,
+ False)
self.consumer.attach_to_eventlet()
self.context = context.get_admin_context()
@@ -129,6 +130,8 @@ class RpcTestCase(test.TestCase):
"""Calls echo in the passed queue"""
LOG.debug(_("Nested received %(queue)s, %(value)s")
% locals())
+ # TODO: so, it will replay the context and use the same REQID?
+ # that's bizarre.
ret = rpc.call(context,
queue,
{"method": "echo",
@@ -137,10 +140,11 @@ class RpcTestCase(test.TestCase):
return value
nested = Nested()
- conn = rpc.Connection.instance(True)
- consumer = rpc.TopicAdapterConsumer(connection=conn,
- topic='nested',
- proxy=nested)
+ conn = rpc.create_connection(True)
+ consumer = rpc.create_consumer(conn,
+ 'nested',
+ nested,
+ False)
consumer.attach_to_eventlet()
value = 42
result = rpc.call(self.context,
@@ -149,47 +153,6 @@ class RpcTestCase(test.TestCase):
"value": value}})
self.assertEqual(value, result)
- def test_connectionpool_single(self):
- """Test that ConnectionPool recycles a single connection."""
- conn1 = rpc.ConnectionPool.get()
- rpc.ConnectionPool.put(conn1)
- conn2 = rpc.ConnectionPool.get()
- rpc.ConnectionPool.put(conn2)
- self.assertEqual(conn1, conn2)
-
- def test_connectionpool_double(self):
- """Test that ConnectionPool returns and reuses separate connections.
-
- When called consecutively we should get separate connections and upon
- returning them those connections should be reused for future calls
- before generating a new connection.
-
- """
- conn1 = rpc.ConnectionPool.get()
- conn2 = rpc.ConnectionPool.get()
-
- self.assertNotEqual(conn1, conn2)
- rpc.ConnectionPool.put(conn1)
- rpc.ConnectionPool.put(conn2)
-
- conn3 = rpc.ConnectionPool.get()
- conn4 = rpc.ConnectionPool.get()
- self.assertEqual(conn1, conn3)
- self.assertEqual(conn2, conn4)
-
- def test_connectionpool_limit(self):
- """Test connection pool limit and connection uniqueness."""
- max_size = FLAGS.rpc_conn_pool_size
- conns = []
-
- for i in xrange(max_size):
- conns.append(rpc.ConnectionPool.get())
-
- self.assertFalse(rpc.ConnectionPool.free_items)
- self.assertEqual(rpc.ConnectionPool.current_size,
- rpc.ConnectionPool.max_size)
- self.assertEqual(len(set(conns)), max_size)
-
class TestReceiver(object):
"""Simple Proxy class so the consumer has methods to call.
diff --git a/nova/tests/test_rpc_amqp.py b/nova/tests/test_rpc_amqp.py
new file mode 100644
index 000000000..d29f7ae32
--- /dev/null
+++ b/nova/tests/test_rpc_amqp.py
@@ -0,0 +1,68 @@
+from nova import context
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova.rpc import amqp
+from nova import test
+
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.rpc')
+
+
+class RpcAMQPTestCase(test.TestCase):
+ def setUp(self):
+ super(RpcAMQPTestCase, self).setUp()
+ self.conn = rpc.create_connection(True)
+ self.receiver = TestReceiver()
+ self.consumer = rpc.create_consumer(self.conn,
+ 'test',
+ self.receiver,
+ False)
+ self.consumer.attach_to_eventlet()
+ self.context = context.get_admin_context()
+
+ def test_connectionpool_single(self):
+ """Test that ConnectionPool recycles a single connection."""
+ conn1 = amqp.ConnectionPool.get()
+ amqp.ConnectionPool.put(conn1)
+ conn2 = amqp.ConnectionPool.get()
+ amqp.ConnectionPool.put(conn2)
+ self.assertEqual(conn1, conn2)
+
+
+class TestReceiver(object):
+ """Simple Proxy class so the consumer has methods to call.
+
+ Uses static methods because we aren't actually storing any state.
+
+ """
+
+ @staticmethod
+ def echo(context, value):
+ """Simply returns whatever value is sent in."""
+ LOG.debug(_("Received %s"), value)
+ return value
+
+ @staticmethod
+ def context(context, value):
+ """Returns dictionary version of context."""
+ LOG.debug(_("Received %s"), context)
+ return context.to_dict()
+
+ @staticmethod
+ def echo_three_times(context, value):
+ context.reply(value)
+ context.reply(value + 1)
+ context.reply(value + 2)
+
+ @staticmethod
+ def echo_three_times_yield(context, value):
+ yield value
+ yield value + 1
+ yield value + 2
+
+ @staticmethod
+ def fail(context, value):
+ """Raises an exception with the value sent in."""
+ raise Exception(value)
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index f45f76b73..bbf47b50f 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -109,103 +109,8 @@ class ServiceTestCase(test.TestCase):
# the looping calls are created in StartService.
app = service.Service.create(host=host, binary=binary, topic=topic)
- self.mox.StubOutWithMock(service.rpc.Connection, 'instance')
- service.rpc.Connection.instance(new=mox.IgnoreArg())
-
- self.mox.StubOutWithMock(rpc,
- 'TopicAdapterConsumer',
- use_mock_anything=True)
- self.mox.StubOutWithMock(rpc,
- 'FanoutAdapterConsumer',
- use_mock_anything=True)
-
- self.mox.StubOutWithMock(rpc,
- 'ConsumerSet',
- use_mock_anything=True)
-
- rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(),
- topic=topic,
- proxy=mox.IsA(service.Service)).AndReturn(
- rpc.TopicAdapterConsumer)
-
- rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(),
- topic='%s.%s' % (topic, host),
- proxy=mox.IsA(service.Service)).AndReturn(
- rpc.TopicAdapterConsumer)
-
- rpc.FanoutAdapterConsumer(connection=mox.IgnoreArg(),
- topic=topic,
- proxy=mox.IsA(service.Service)).AndReturn(
- rpc.FanoutAdapterConsumer)
-
- def wait_func(self, limit=None):
- return None
-
- mock_cset = self.mox.CreateMock(rpc.ConsumerSet,
- {'wait': wait_func})
- rpc.ConsumerSet(connection=mox.IgnoreArg(),
- consumer_list=mox.IsA(list)).AndReturn(mock_cset)
- wait_func(mox.IgnoreArg())
-
- service_create = {'host': host,
- 'binary': binary,
- 'topic': topic,
- 'report_count': 0,
- 'availability_zone': 'nova'}
- service_ref = {'host': host,
- 'binary': binary,
- 'report_count': 0,
- 'id': 1}
-
- service.db.service_get_by_args(mox.IgnoreArg(),
- host,
- binary).AndRaise(exception.NotFound())
- service.db.service_create(mox.IgnoreArg(),
- service_create).AndReturn(service_ref)
- self.mox.ReplayAll()
-
- app.start()
- app.stop()
self.assert_(app)
- # We're testing sort of weird behavior in how report_state decides
- # whether it is disconnected, it looks for a variable on itself called
- # 'model_disconnected' and report_state doesn't really do much so this
- # these are mostly just for coverage
- def test_report_state_no_service(self):
- host = 'foo'
- binary = 'bar'
- topic = 'test'
- service_create = {'host': host,
- 'binary': binary,
- 'topic': topic,
- 'report_count': 0,
- 'availability_zone': 'nova'}
- service_ref = {'host': host,
- 'binary': binary,
- 'topic': topic,
- 'report_count': 0,
- 'availability_zone': 'nova',
- 'id': 1}
-
- service.db.service_get_by_args(mox.IgnoreArg(),
- host,
- binary).AndRaise(exception.NotFound())
- service.db.service_create(mox.IgnoreArg(),
- service_create).AndReturn(service_ref)
- service.db.service_get(mox.IgnoreArg(),
- service_ref['id']).AndReturn(service_ref)
- service.db.service_update(mox.IgnoreArg(), service_ref['id'],
- mox.ContainsKeyValue('report_count', 1))
-
- self.mox.ReplayAll()
- serv = service.Service(host,
- binary,
- topic,
- 'nova.tests.test_service.FakeManager')
- serv.start()
- serv.report_state()
-
def test_report_state_newly_disconnected(self):
host = 'foo'
binary = 'bar'
@@ -276,81 +181,6 @@ class ServiceTestCase(test.TestCase):
self.assert_(not serv.model_disconnected)
- def test_compute_can_update_available_resource(self):
- """Confirm compute updates their record of compute-service table."""
- host = 'foo'
- binary = 'nova-compute'
- topic = 'compute'
-
- # Any mocks are not working without UnsetStubs() here.
- self.mox.UnsetStubs()
- ctxt = context.get_admin_context()
- service_ref = db.service_create(ctxt, {'host': host,
- 'binary': binary,
- 'topic': topic})
- serv = service.Service(host,
- binary,
- topic,
- 'nova.compute.manager.ComputeManager')
-
- # This testcase want to test calling update_available_resource.
- # No need to call periodic call, then below variable must be set 0.
- serv.report_interval = 0
- serv.periodic_interval = 0
-
- # Creating mocks
- self.mox.StubOutWithMock(service.rpc.Connection, 'instance')
- service.rpc.Connection.instance(new=mox.IgnoreArg())
-
- self.mox.StubOutWithMock(rpc,
- 'TopicAdapterConsumer',
- use_mock_anything=True)
- self.mox.StubOutWithMock(rpc,
- 'FanoutAdapterConsumer',
- use_mock_anything=True)
-
- self.mox.StubOutWithMock(rpc,
- 'ConsumerSet',
- use_mock_anything=True)
-
- rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(),
- topic=topic,
- proxy=mox.IsA(service.Service)).AndReturn(
- rpc.TopicAdapterConsumer)
-
- rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(),
- topic='%s.%s' % (topic, host),
- proxy=mox.IsA(service.Service)).AndReturn(
- rpc.TopicAdapterConsumer)
-
- rpc.FanoutAdapterConsumer(connection=mox.IgnoreArg(),
- topic=topic,
- proxy=mox.IsA(service.Service)).AndReturn(
- rpc.FanoutAdapterConsumer)
-
- def wait_func(self, limit=None):
- return None
-
- mock_cset = self.mox.CreateMock(rpc.ConsumerSet,
- {'wait': wait_func})
- rpc.ConsumerSet(connection=mox.IgnoreArg(),
- consumer_list=mox.IsA(list)).AndReturn(mock_cset)
- wait_func(mox.IgnoreArg())
-
- self.mox.StubOutWithMock(serv.manager.driver,
- 'update_available_resource')
- serv.manager.driver.update_available_resource(mox.IgnoreArg(), host)
-
- # Just doing start()-stop(), not confirm new db record is created,
- # because update_available_resource() works only in
- # libvirt environment. This testcase confirms
- # update_available_resource() is called. Otherwise, mox complains.
- self.mox.ReplayAll()
- serv.start()
- serv.stop()
-
- db.service_destroy(ctxt, service_ref['id'])
-
class TestWSGIService(test.TestCase):
diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py
index 35c838065..64f11fa45 100644
--- a/nova/tests/test_test.py
+++ b/nova/tests/test_test.py
@@ -33,8 +33,13 @@ class IsolationTestCase(test.TestCase):
self.start_service('compute')
def test_rpc_consumer_isolation(self):
- connection = rpc.Connection.instance(new=True)
- consumer = rpc.TopicAdapterConsumer(connection, topic='compute')
- consumer.register_callback(
- lambda x, y: self.fail('I should never be called'))
+ class NeverCalled(object):
+
+ def __getattribute__(*args):
+ assert False, "I should never get called."
+
+ connection = rpc.create_connection(new=True)
+ proxy = NeverCalled()
+ consumer = rpc.create_consumer(connection, 'compute',
+ proxy, fanout=False)
consumer.attach_to_eventlet()
diff --git a/nova/tests/test_twistd.py b/nova/tests/test_twistd.py
deleted file mode 100644
index ff8627c3b..000000000
--- a/nova/tests/test_twistd.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import StringIO
-import sys
-
-from nova import twistd
-from nova import exception
-from nova import flags
-from nova import test
-
-
-FLAGS = flags.FLAGS
-
-
-class TwistdTestCase(test.TestCase):
- def setUp(self):
- super(TwistdTestCase, self).setUp()
- self.Options = twistd.WrapTwistedOptions(twistd.TwistdServerOptions)
- sys.stdout = StringIO.StringIO()
-
- def tearDown(self):
- super(TwistdTestCase, self).tearDown()
- sys.stdout = sys.__stdout__
-
- def test_basic(self):
- options = self.Options()
- argv = options.parseOptions()
-
- def test_logfile(self):
- options = self.Options()
- argv = options.parseOptions(['--logfile=foo'])
- self.assertEqual(FLAGS.logfile, 'foo')
-
- def test_help(self):
- options = self.Options()
- self.assertRaises(SystemExit, options.parseOptions, ['--help'])
- self.assert_('pidfile' in sys.stdout.getvalue())
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 0c359e981..ec5098a37 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
import os
import tempfile
@@ -306,3 +307,80 @@ class IsUUIDLikeTestCase(test.TestCase):
def test_non_uuid_string_passed(self):
val = 'foo-fooo'
self.assertUUIDLike(val, False)
+
+
+class ToPrimitiveTestCase(test.TestCase):
+ def test_list(self):
+ self.assertEquals(utils.to_primitive([1, 2, 3]), [1, 2, 3])
+
+ def test_empty_list(self):
+ self.assertEquals(utils.to_primitive([]), [])
+
+ def test_tuple(self):
+ self.assertEquals(utils.to_primitive((1, 2, 3)), [1, 2, 3])
+
+ def test_dict(self):
+ self.assertEquals(utils.to_primitive(dict(a=1, b=2, c=3)),
+ dict(a=1, b=2, c=3))
+
+ def test_empty_dict(self):
+ self.assertEquals(utils.to_primitive({}), {})
+
+ def test_datetime(self):
+ x = datetime.datetime(1, 2, 3, 4, 5, 6, 7)
+ self.assertEquals(utils.to_primitive(x), "0001-02-03 04:05:06.000007")
+
+ def test_iter(self):
+ class IterClass(object):
+ def __init__(self):
+ self.data = [1, 2, 3, 4, 5]
+ self.index = 0
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self.index == len(self.data):
+ raise StopIteration
+ self.index = self.index + 1
+ return self.data[self.index - 1]
+
+ x = IterClass()
+ self.assertEquals(utils.to_primitive(x), [1, 2, 3, 4, 5])
+
+ def test_iteritems(self):
+ class IterItemsClass(object):
+ def __init__(self):
+ self.data = dict(a=1, b=2, c=3).items()
+ self.index = 0
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self.index == len(self.data):
+ raise StopIteration
+ self.index = self.index + 1
+ return self.data[self.index - 1]
+
+ x = IterItemsClass()
+ ordered = utils.to_primitive(x)
+ ordered.sort()
+ self.assertEquals(ordered, [['a', 1], ['b', 2], ['c', 3]])
+
+ def test_instance(self):
+ class MysteryClass(object):
+ a = 10
+
+ def __init__(self):
+ self.b = 1
+
+ x = MysteryClass()
+ self.assertEquals(utils.to_primitive(x, convert_instances=True),
+ dict(b=1))
+
+ self.assertEquals(utils.to_primitive(x), x)
+
+ def test_typeerror(self):
+ x = bytearray # Class, not instance
+ self.assertEquals(utils.to_primitive(x), u"<type 'bytearray'>")
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 7313508a6..06daf46e8 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -19,11 +19,11 @@
Test suite for VMWareAPI.
"""
+from nova import context
from nova import db
from nova import flags
from nova import test
from nova import utils
-from nova.auth import manager
from nova.compute import power_state
from nova.tests.glance import stubs as glance_stubs
from nova.tests.vmwareapi import db_fakes
@@ -40,13 +40,13 @@ class VMWareAPIVMTestCase(test.TestCase):
def setUp(self):
super(VMWareAPIVMTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake', False)
self.flags(vmwareapi_host_ip='test_url',
vmwareapi_host_username='test_username',
vmwareapi_host_password='test_pass')
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake',
- admin=True)
- self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
self.network = utils.import_object(FLAGS.network_manager)
vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
@@ -77,14 +77,12 @@ class VMWareAPIVMTestCase(test.TestCase):
def tearDown(self):
super(VMWareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
- self.manager.delete_project(self.project)
- self.manager.delete_user(self.user)
def _create_instance_in_the_db(self):
values = {'name': 1,
'id': 1,
- 'project_id': self.project.id,
- 'user_id': self.user.id,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
@@ -97,7 +95,7 @@ class VMWareAPIVMTestCase(test.TestCase):
"""Create and spawn the VM."""
self._create_instance_in_the_db()
self.type_data = db.instance_type_get_by_name(None, 'm1.large')
- self.conn.spawn(self.instance, self.network_info)
+ self.conn.spawn(self.context, self.instance, self.network_info)
self._check_vm_record()
def _check_vm_record(self):
@@ -159,14 +157,14 @@ class VMWareAPIVMTestCase(test.TestCase):
self._create_vm()
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
- self.conn.snapshot(self.instance, "Test-Snapshot")
+ self.conn.snapshot(self.context, self.instance, "Test-Snapshot")
info = self.conn.get_info(1)
self._check_vm_info(info, power_state.RUNNING)
def test_snapshot_non_existent(self):
self._create_instance_in_the_db()
- self.assertRaises(Exception, self.conn.snapshot, self.instance,
- "Test-Snapshot")
+ self.assertRaises(Exception, self.conn.snapshot, self.context,
+ self.instance, "Test-Snapshot")
def test_reboot(self):
self._create_vm()
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 8b3b5fa28..a795b3c74 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -30,7 +30,6 @@ from nova import flags
from nova import log as logging
from nova import test
from nova import utils
-from nova.auth import manager
from nova.compute import instance_types
from nova.compute import power_state
from nova import exception
@@ -69,7 +68,9 @@ class XenAPIVolumeTestCase(test.TestCase):
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
- self.context = context.RequestContext('fake', 'fake', False)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
FLAGS.target_host = '127.0.0.1'
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
@@ -77,7 +78,7 @@ class XenAPIVolumeTestCase(test.TestCase):
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
self.values = {'id': 1,
- 'project_id': 'fake',
+ 'project_id': self.user_id,
'user_id': 'fake',
'image_ref': 1,
'kernel_id': 2,
@@ -173,10 +174,6 @@ class XenAPIVMTestCase(test.TestCase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake',
- admin=True)
- self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
self.stubs = stubout.StubOutForTesting()
self.flags(xenapi_connection_url='test_url',
@@ -195,7 +192,9 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs)
- self.context = context.RequestContext('fake', 'fake', False)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.get_connection(False)
def test_parallel_builds(self):
@@ -227,10 +226,10 @@ class XenAPIVMTestCase(test.TestCase):
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
instance = db.instance_create(self.context, values)
- self.conn.spawn(instance, network_info)
+ self.conn.spawn(self.context, instance, network_info)
- gt1 = eventlet.spawn(_do_build, 1, self.project.id, self.user.id)
- gt2 = eventlet.spawn(_do_build, 2, self.project.id, self.user.id)
+ gt1 = eventlet.spawn(_do_build, 1, self.project_id, self.user_id)
+ gt2 = eventlet.spawn(_do_build, 2, self.project_id, self.user_id)
gt1.wait()
gt2.wait()
@@ -257,14 +256,15 @@ class XenAPIVMTestCase(test.TestCase):
instance = self._create_instance()
name = "MySnapshot"
- self.assertRaises(exception.Error, self.conn.snapshot, instance, name)
+ self.assertRaises(exception.Error, self.conn.snapshot,
+ self.context, instance, name)
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance()
name = "MySnapshot"
- template_vm_ref = self.conn.snapshot(instance, name)
+ template_vm_ref = self.conn.snapshot(self.context, instance, name)
def ensure_vm_was_torn_down():
vm_labels = []
@@ -401,8 +401,8 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_loopingcall_start(self.stubs)
if create_record:
values = {'id': instance_id,
- 'project_id': self.project.id,
- 'user_id': self.user.id,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
'image_ref': image_ref,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
@@ -426,14 +426,13 @@ class XenAPIVMTestCase(test.TestCase):
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
- self.conn.spawn(instance, network_info)
+ self.conn.spawn(self.context, instance, network_info)
self.create_vm_record(self.conn, os_type, instance_id)
self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance.os_type)
self.assertTrue(instance.architecture)
def test_spawn_not_enough_memory(self):
- FLAGS.xenapi_image_service = 'glance'
self.assertRaises(Exception,
self._test_spawn,
1, 2, 3, "4") # m1.xlarge
@@ -445,7 +444,6 @@ class XenAPIVMTestCase(test.TestCase):
"""
vdi_recs_start = self._list_vdis()
- FLAGS.xenapi_image_service = 'glance'
stubs.stubout_fetch_image_glance_disk(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
@@ -460,7 +458,6 @@ class XenAPIVMTestCase(test.TestCase):
"""
vdi_recs_start = self._list_vdis()
- FLAGS.xenapi_image_service = 'glance'
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
@@ -468,22 +465,12 @@ class XenAPIVMTestCase(test.TestCase):
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
- def test_spawn_raw_objectstore(self):
- FLAGS.xenapi_image_service = 'objectstore'
- self._test_spawn(1, None, None)
-
- def test_spawn_objectstore(self):
- FLAGS.xenapi_image_service = 'objectstore'
- self._test_spawn(1, 2, 3)
-
@stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
- FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_linux(self):
- FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
@@ -512,20 +499,17 @@ class XenAPIVMTestCase(test.TestCase):
self.assertEqual(len(self.vm['VBDs']), 1)
def test_spawn_vhd_glance_windows(self):
- FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
- FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_netinject_file(self):
- FLAGS.xenapi_image_service = 'glance'
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
@@ -551,7 +535,6 @@ class XenAPIVMTestCase(test.TestCase):
# Capture the sudo tee .../etc/network/interfaces command
(r'(sudo\s+)?tee.*interfaces', _tee_handler),
])
- FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
@@ -559,7 +542,6 @@ class XenAPIVMTestCase(test.TestCase):
self.assertTrue(self._tee_executed)
def test_spawn_netinject_xenstore(self):
- FLAGS.xenapi_image_service = 'glance'
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
@@ -604,7 +586,7 @@ class XenAPIVMTestCase(test.TestCase):
self.assertFalse(self._tee_executed)
def test_spawn_vlanmanager(self):
- self.flags(xenapi_image_service='glance',
+ self.flags(image_service='nova.image.glance.GlanceImageService',
network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
@@ -626,7 +608,7 @@ class XenAPIVMTestCase(test.TestCase):
host=FLAGS.host,
vpn=None,
instance_type_id=1,
- project_id=self.project.id)
+ project_id=self.project_id)
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
@@ -648,7 +630,7 @@ class XenAPIVMTestCase(test.TestCase):
self.flags(flat_injected=False)
instance = self._create_instance()
conn = xenapi_conn.get_connection(False)
- conn.rescue(instance, None, [])
+ conn.rescue(self.context, instance, None, [])
def test_unrescue(self):
instance = self._create_instance()
@@ -656,21 +638,13 @@ class XenAPIVMTestCase(test.TestCase):
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(Exception, conn.unrescue, instance, None)
- def tearDown(self):
- super(XenAPIVMTestCase, self).tearDown()
- self.manager.delete_project(self.project)
- self.manager.delete_user(self.user)
- self.vm_info = None
- self.vm = None
- self.stubs.UnsetAll()
-
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
stubs.stubout_loopingcall_start(self.stubs)
values = {
'id': instance_id,
- 'project_id': self.project.id,
- 'user_id': self.user.id,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
@@ -693,7 +667,7 @@ class XenAPIVMTestCase(test.TestCase):
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
if spawn:
- self.conn.spawn(instance, network_info)
+ self.conn.spawn(self.context, instance, network_info)
return instance
@@ -752,14 +726,12 @@ class XenAPIMigrateInstance(test.TestCase):
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake',
- admin=True)
- self.project = self.manager.create_project('fake', 'fake', 'fake')
- self.context = context.RequestContext('fake', 'fake', False)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
self.values = {'id': 1,
- 'project_id': self.project.id,
- 'user_id': self.user.id,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
@@ -773,20 +745,83 @@ class XenAPIMigrateInstance(test.TestCase):
stubs.stubout_get_this_vm_uuid(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
- def tearDown(self):
- super(XenAPIMigrateInstance, self).tearDown()
- self.manager.delete_project(self.project)
- self.manager.delete_user(self.user)
- self.stubs.UnsetAll()
-
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.values)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
conn = xenapi_conn.get_connection(False)
conn.migrate_disk_and_power_off(instance, '127.0.0.1')
- def test_finish_resize(self):
+ def test_finish_migrate(self):
+ instance = db.instance_create(self.context, self.values)
+ self.called = False
+
+ def fake_vdi_resize(*args, **kwargs):
+ self.called = True
+
+ self.stubs.Set(stubs.FakeSessionForMigrationTests,
+ "VDI_resize_online", fake_vdi_resize)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
+ stubs.stubout_loopingcall_start(self.stubs)
+ conn = xenapi_conn.get_connection(False)
+ network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
+ {'broadcast': '192.168.0.255',
+ 'dns': ['192.168.0.1'],
+ 'gateway': '192.168.0.1',
+ 'gateway6': 'dead:beef::1',
+ 'ip6s': [{'enabled': '1',
+ 'ip': 'dead:beef::dcad:beff:feef:0',
+ 'netmask': '64'}],
+ 'ips': [{'enabled': '1',
+ 'ip': '192.168.0.100',
+ 'netmask': '255.255.255.0'}],
+ 'label': 'fake',
+ 'mac': 'DE:AD:BE:EF:00:00',
+ 'rxtx_cap': 3})]
+ conn.finish_migration(self.context, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, resize_instance=True)
+ self.assertEqual(self.called, True)
+
+ def test_finish_migrate_no_local_storage(self):
+ tiny_type_id = \
+ instance_types.get_instance_type_by_name('m1.tiny')['id']
+ self.values.update({'instance_type_id': tiny_type_id, 'local_gb': 0})
+ instance = db.instance_create(self.context, self.values)
+
+ def fake_vdi_resize(*args, **kwargs):
+ raise Exception("This shouldn't be called")
+
+ self.stubs.Set(stubs.FakeSessionForMigrationTests,
+ "VDI_resize_online", fake_vdi_resize)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
+ stubs.stubout_loopingcall_start(self.stubs)
+ conn = xenapi_conn.get_connection(False)
+ network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
+ {'broadcast': '192.168.0.255',
+ 'dns': ['192.168.0.1'],
+ 'gateway': '192.168.0.1',
+ 'gateway6': 'dead:beef::1',
+ 'ip6s': [{'enabled': '1',
+ 'ip': 'dead:beef::dcad:beff:feef:0',
+ 'netmask': '64'}],
+ 'ips': [{'enabled': '1',
+ 'ip': '192.168.0.100',
+ 'netmask': '255.255.255.0'}],
+ 'label': 'fake',
+ 'mac': 'DE:AD:BE:EF:00:00',
+ 'rxtx_cap': 3})]
+ conn.finish_migration(self.context, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, resize_instance=True)
+
+ def test_finish_migrate_no_resize_vdi(self):
instance = db.instance_create(self.context, self.values)
+
+ def fake_vdi_resize(*args, **kwargs):
+ raise Exception("This shouldn't be called")
+
+ self.stubs.Set(stubs.FakeSessionForMigrationTests,
+ "VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
@@ -804,8 +839,27 @@ class XenAPIMigrateInstance(test.TestCase):
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
- conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'),
- network_info)
+
+ # Resize instance would be determined by the compute call
+ conn.finish_migration(self.context, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, resize_instance=False)
+
+
+class XenAPIImageTypeTestCase(test.TestCase):
+ """Test ImageType class."""
+
+ def test_to_string(self):
+ """Can convert from type id to type string."""
+ self.assertEquals(
+ vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
+ vm_utils.ImageType.KERNEL_STR)
+
+ def test_from_string(self):
+ """Can convert from string to type id."""
+ self.assertEquals(
+ vm_utils.ImageType.from_string(vm_utils.ImageType.KERNEL_STR),
+ vm_utils.ImageType.KERNEL)
class XenAPIDetermineDiskImageTestCase(test.TestCase):
@@ -829,7 +883,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
def test_instance_disk(self):
"""If a kernel is specified, the image type is DISK (aka machine)."""
- FLAGS.xenapi_image_service = 'objectstore'
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE
self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
self.assert_disk_type(vm_utils.ImageType.DISK)
@@ -839,7 +892,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
If the kernel isn't specified, and we're not using Glance, then
DISK_RAW is assumed.
"""
- FLAGS.xenapi_image_service = 'objectstore'
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
@@ -849,7 +901,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
If we're using Glance, then defer to the image_type field, which in
this case will be 'raw'.
"""
- FLAGS.xenapi_image_service = 'glance'
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
@@ -859,7 +910,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
If we're using Glance, then defer to the image_type field, which in
this case will be 'vhd'.
"""
- FLAGS.xenapi_image_service = 'glance'
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 66c79d465..0d0f84e32 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -28,8 +28,8 @@ from nova import utils
def stubout_instance_snapshot(stubs):
@classmethod
- def fake_fetch_image(cls, session, instance_id, image, user, project,
- type):
+ def fake_fetch_image(cls, context, session, instance_id, image, user,
+ project, type):
from nova.virt.xenapi.fake import create_vdi
name_label = "instance-%s" % instance_id
#TODO: create fake SR record
@@ -227,7 +227,7 @@ def stub_out_vm_methods(stubs):
def fake_release_bootlock(self, vm):
pass
- def fake_spawn_rescue(self, inst):
+ def fake_spawn_rescue(self, context, inst, network_info):
inst._rescue = False
stubs.Set(vmops.VMOps, "_shutdown", fake_shutdown)
diff --git a/nova/twistd.py b/nova/twistd.py
deleted file mode 100644
index 15cf67825..000000000
--- a/nova/twistd.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Twisted daemon helpers, specifically to parse out gFlags from twisted flags,
-manage pid files and support syslogging.
-"""
-
-import gflags
-import os
-import signal
-import sys
-import time
-from twisted.scripts import twistd
-from twisted.python import log
-from twisted.python import reflect
-from twisted.python import runtime
-from twisted.python import usage
-
-from nova import flags
-from nova import log as logging
-
-
-if runtime.platformType == "win32":
- from twisted.scripts._twistw import ServerOptions
-else:
- from twisted.scripts._twistd_unix import ServerOptions
-
-
-FLAGS = flags.FLAGS
-
-
-class TwistdServerOptions(ServerOptions):
- def parseArgs(self, *args):
- return
-
-
-class FlagParser(object):
- # this is a required attribute for gflags
- syntactic_help = ''
-
- def __init__(self, parser):
- self.parser = parser
-
- def Parse(self, s):
- return self.parser(s)
-
-
-def WrapTwistedOptions(wrapped):
- class TwistedOptionsToFlags(wrapped):
- subCommands = None
-
- def __init__(self):
- # NOTE(termie): _data exists because Twisted stuff expects
- # to be able to set arbitrary things that are
- # not actual flags
- self._data = {}
- self._flagHandlers = {}
- self._paramHandlers = {}
-
- # Absorb the twistd flags into our FLAGS
- self._absorbFlags()
- self._absorbParameters()
- self._absorbHandlers()
-
- wrapped.__init__(self)
-
- def _absorbFlags(self):
- twistd_flags = []
- reflect.accumulateClassList(self.__class__, 'optFlags',
- twistd_flags)
- for flag in twistd_flags:
- key = flag[0].replace('-', '_')
- if hasattr(FLAGS, key):
- continue
- flags.DEFINE_boolean(key, None, str(flag[-1]))
-
- def _absorbParameters(self):
- twistd_params = []
- reflect.accumulateClassList(self.__class__, 'optParameters',
- twistd_params)
- for param in twistd_params:
- key = param[0].replace('-', '_')
- if hasattr(FLAGS, key):
- continue
- if len(param) > 4:
- flags.DEFINE(FlagParser(param[4]),
- key, param[2], str(param[3]),
- serializer=gflags.ArgumentSerializer())
- else:
- flags.DEFINE_string(key, param[2], str(param[3]))
-
- def _absorbHandlers(self):
- twistd_handlers = {}
- reflect.addMethodNamesToDict(self.__class__, twistd_handlers,
- "opt_")
-
- # NOTE(termie): Much of the following is derived/copied from
- # twisted.python.usage with the express purpose of
- # providing compatibility
- for name in twistd_handlers.keys():
- method = getattr(self, 'opt_' + name)
-
- takesArg = not usage.flagFunction(method, name)
- doc = getattr(method, '__doc__', None)
- if not doc:
- doc = 'undocumented'
-
- if not takesArg:
- if name not in FLAGS:
- flags.DEFINE_boolean(name, None, doc)
- self._flagHandlers[name] = method
- else:
- if name not in FLAGS:
- flags.DEFINE_string(name, None, doc)
- self._paramHandlers[name] = method
-
- def _doHandlers(self):
- for flag, handler in self._flagHandlers.iteritems():
- if self[flag]:
- handler()
- for param, handler in self._paramHandlers.iteritems():
- if self[param] is not None:
- handler(self[param])
-
- def __str__(self):
- return str(FLAGS)
-
- def parseOptions(self, options=None):
- if options is None:
- options = sys.argv
- else:
- options.insert(0, '')
-
- args = FLAGS(options)
- logging.setup()
- argv = args[1:]
- # ignore subcommands
-
- try:
- self.parseArgs(*argv)
- except TypeError:
- raise usage.UsageError(_("Wrong number of arguments."))
-
- self.postOptions()
- return args
-
- def parseArgs(self, *args):
- # TODO(termie): figure out a decent way of dealing with args
- #return
- wrapped.parseArgs(self, *args)
-
- def postOptions(self):
- self._doHandlers()
-
- wrapped.postOptions(self)
-
- def __getitem__(self, key):
- key = key.replace('-', '_')
- try:
- return getattr(FLAGS, key)
- except (AttributeError, KeyError):
- return self._data[key]
-
- def __setitem__(self, key, value):
- key = key.replace('-', '_')
- try:
- return setattr(FLAGS, key, value)
- except (AttributeError, KeyError):
- self._data[key] = value
-
- def get(self, key, default):
- key = key.replace('-', '_')
- try:
- return getattr(FLAGS, key)
- except (AttributeError, KeyError):
- self._data.get(key, default)
-
- return TwistedOptionsToFlags
-
-
-def stop(pidfile):
- """
- Stop the daemon
- """
- # Get the pid from the pidfile
- try:
- pf = file(pidfile, 'r')
- pid = int(pf.read().strip())
- pf.close()
- except IOError:
- pid = None
-
- if not pid:
- message = _("pidfile %s does not exist. Daemon not running?\n")
- sys.stderr.write(message % pidfile)
- # Not an error in a restart
- return
-
- # Try killing the daemon process
- try:
- while 1:
- os.kill(pid, signal.SIGKILL)
- time.sleep(0.1)
- except OSError, err:
- err = str(err)
- if err.find(_("No such process")) > 0:
- if os.path.exists(pidfile):
- os.remove(pidfile)
- else:
- print str(err)
- sys.exit(1)
-
-
-def serve(filename):
- logging.debug(_("Serving %s") % filename)
- name = os.path.basename(filename)
- OptionsClass = WrapTwistedOptions(TwistdServerOptions)
- options = OptionsClass()
- argv = options.parseOptions()
- FLAGS.python = filename
- FLAGS.no_save = True
- if not FLAGS.pidfile:
- FLAGS.pidfile = '%s.pid' % name
- elif FLAGS.pidfile.endswith('twistd.pid'):
- FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name)
- if not FLAGS.prefix:
- FLAGS.prefix = name
- elif FLAGS.prefix.endswith('twisted'):
- FLAGS.prefix = FLAGS.prefix.replace('twisted', name)
-
- action = 'start'
- if len(argv) > 1:
- action = argv.pop()
-
- if action == 'stop':
- stop(FLAGS.pidfile)
- sys.exit()
- elif action == 'restart':
- stop(FLAGS.pidfile)
- elif action == 'start':
- pass
- else:
- print 'usage: %s [options] [start|stop|restart]' % argv[0]
- sys.exit(1)
-
- logging.debug(_("Full set of FLAGS:"))
- for flag in FLAGS:
- logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
-
- logging.audit(_("Starting %s"), name)
- twistd.runApp(options)
diff --git a/nova/utils.py b/nova/utils.py
index 8784a227d..4ea623cc1 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -19,7 +19,6 @@
"""Utilities and helper functions."""
-import base64
import datetime
import functools
import inspect
@@ -30,7 +29,6 @@ import os
import random
import re
import socket
-import string
import struct
import sys
import time
@@ -50,7 +48,8 @@ from nova import version
LOG = logging.getLogger("nova.utils")
-TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
+ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
+PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
FLAGS = flags.FLAGS
@@ -361,16 +360,26 @@ def clear_time_override():
utcnow.override_time = None
-def isotime(at=None):
- """Returns iso formatted utcnow."""
+def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
+ """Returns formatted utcnow."""
if not at:
at = utcnow()
- return at.strftime(TIME_FORMAT)
+ return at.strftime(fmt)
+
+
+def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
+ """Turn a formatted time back into a datetime."""
+ return datetime.datetime.strptime(timestr, fmt)
+
+
+def isotime(at=None):
+ """Returns iso formatted utcnow."""
+ return strtime(at, ISO_TIME_FORMAT)
def parse_isotime(timestr):
"""Turn an iso formatted time back into a datetime."""
- return datetime.datetime.strptime(timestr, TIME_FORMAT)
+ return parse_strtime(timestr, ISO_TIME_FORMAT)
def parse_mailmap(mailmap='.mailmap'):
@@ -504,25 +513,61 @@ def utf8(value):
return value
-def to_primitive(value):
- if type(value) is type([]) or type(value) is type((None,)):
- o = []
- for v in value:
- o.append(to_primitive(v))
- return o
- elif type(value) is type({}):
- o = {}
- for k, v in value.iteritems():
- o[k] = to_primitive(v)
- return o
- elif isinstance(value, datetime.datetime):
- return str(value)
- elif hasattr(value, 'iteritems'):
- return to_primitive(dict(value.iteritems()))
- elif hasattr(value, '__iter__'):
- return to_primitive(list(value))
- else:
- return value
+def to_primitive(value, convert_instances=False, level=0):
+ """Convert a complex object into primitives.
+
+ Handy for JSON serialization. We can optionally handle instances,
+ but since this is a recursive function, we could have cyclical
+ data structures.
+
+ To handle cyclical data structures we could track the actual objects
+ visited in a set, but not all objects are hashable. Instead we just
+ track the depth of the object inspections and don't go too deep.
+
+ Therefore, convert_instances=True is lossy ... be aware.
+
+ """
+ if inspect.isclass(value):
+ return unicode(value)
+
+ if level > 3:
+ return []
+
+ # The try block may not be necessary after the class check above,
+ # but just in case ...
+ try:
+ if type(value) is type([]) or type(value) is type((None,)):
+ o = []
+ for v in value:
+ o.append(to_primitive(v, convert_instances=convert_instances,
+ level=level))
+ return o
+ elif type(value) is type({}):
+ o = {}
+ for k, v in value.iteritems():
+ o[k] = to_primitive(v, convert_instances=convert_instances,
+ level=level)
+ return o
+ elif isinstance(value, datetime.datetime):
+ return str(value)
+ elif hasattr(value, 'iteritems'):
+ return to_primitive(dict(value.iteritems()),
+ convert_instances=convert_instances,
+ level=level)
+ elif hasattr(value, '__iter__'):
+ return to_primitive(list(value), level)
+ elif convert_instances and hasattr(value, '__dict__'):
+ # Likely an instance of something. Watch for cycles.
+ # Ignore class member vars.
+ return to_primitive(value.__dict__,
+ convert_instances=convert_instances,
+ level=level + 1)
+ else:
+ return value
+ except TypeError, e:
+ # Class objects are tricky since they may define something like
+ # __iter__ defined but it isn't callable as list().
+ return unicode(value)
def dumps(value):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 34dc5f544..4f3cfefad 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -40,6 +40,7 @@ class ComputeDriver(object):
def init_host(self, host):
"""Adopt existing VM's running here"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_info(self, instance_name):
@@ -52,16 +53,20 @@ class ComputeDriver(object):
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def list_instances(self):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def list_instances_detail(self):
"""Return a list of InstanceInfo for all registered VMs"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance, network_info,
+ block_device_mapping=None):
"""Launch a VM for the specified instance"""
raise NotImplementedError()
@@ -79,29 +84,36 @@ class ComputeDriver(object):
warning in that case.
"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reboot(self, instance, network_info):
"""Reboot specified VM"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def snapshot_instance(self, context, instance_id, image_id):
raise NotImplementedError()
def get_console_pool_info(self, console_type):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_output(self, instance):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_ajax_console(self, instance):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_host_ip_addr(self):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def attach_volume(self, context, instance_id, volume_id, mountpoint):
@@ -116,42 +128,50 @@ class ComputeDriver(object):
def migrate_disk_and_power_off(self, instance, dest):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def snapshot(self, instance, image_id):
+ def snapshot(self, context, instance, image_id):
"""Create snapshot from a running VM instance."""
raise NotImplementedError()
- def finish_resize(self, instance, disk_info):
+ def finish_migration(self, context, instance, disk_info, network_info,
+ resize_instance):
"""Completes a resize, turning on the migrated instance"""
raise NotImplementedError()
- def revert_resize(self, instance):
+ def revert_migration(self, instance):
"""Reverts a resize, powering back on the instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def pause(self, instance, callback):
"""Pause VM instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unpause(self, instance, callback):
"""Unpause paused VM instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance, callback):
"""suspend the specified instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume(self, instance, callback):
"""resume the specified instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def rescue(self, instance, callback, network_info):
+ def rescue(self, context, instance, callback, network_info):
"""Rescue the specified instance"""
raise NotImplementedError()
def unrescue(self, instance, callback, network_info):
"""Unrescue the specified instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def update_available_resource(self, ctxt, host):
@@ -164,6 +184,7 @@ class ComputeDriver(object):
:param host: hostname that compute manager is currently running
"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def live_migration(self, ctxt, instance_ref, dest,
@@ -183,20 +204,25 @@ class ComputeDriver(object):
expected nova.compute.manager.recover_live_migration.
"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_provider_fw_rules(self, security_group_id):
"""See: nova/virt/fake.py for docs."""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
pass
def ensure_filtering_rules_for_instance(self, instance_ref):
@@ -222,10 +248,12 @@ class ComputeDriver(object):
:params instance_ref: nova.db.sqlalchemy.models.Instance object
"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def set_admin_password(self, context, instance_id, new_pass=None):
@@ -236,24 +264,30 @@ class ComputeDriver(object):
"""Create a file on the VM instance. The file path and contents
should be base64-encoded.
"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def agent_update(self, instance, url, md5hash):
"""Update agent on the VM instance."""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
pass
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plugs in VIFs to networks."""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 26bc421c0..80abcc644 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -129,7 +129,8 @@ class FakeConnection(driver.ComputeDriver):
info_list.append(self._map_to_instance_info(instance))
return info_list
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance, network_info,
+ block_device_mapping=None):
"""
Create a new instance/VM/domain on the virtualization platform.
@@ -153,7 +154,7 @@ class FakeConnection(driver.ComputeDriver):
fake_instance = FakeInstance(name, state)
self.instances[name] = fake_instance
- def snapshot(self, instance, name):
+ def snapshot(self, context, instance, name):
"""
Snapshots the specified instance.
@@ -240,7 +241,7 @@ class FakeConnection(driver.ComputeDriver):
"""
pass
- def rescue(self, instance, callback, network_info):
+ def rescue(self, context, instance, callback, network_info):
"""
Rescue the specified instance.
"""
@@ -340,8 +341,7 @@ class FakeConnection(driver.ComputeDriver):
only useful for giving back to this layer as a parameter to
disk_stats). These IDs only need to be unique for a given instance.
- Note that this function takes an instance ID, not a
- compute.service.Instance, so that it can be called by compute.monitor.
+ Note that this function takes an instance ID.
"""
return ['A_DISK']
@@ -353,8 +353,7 @@ class FakeConnection(driver.ComputeDriver):
interface_stats). These IDs only need to be unique for a given
instance.
- Note that this function takes an instance ID, not a
- compute.service.Instance, so that it can be called by compute.monitor.
+ Note that this function takes an instance ID.
"""
return ['A_VIF']
@@ -374,8 +373,7 @@ class FakeConnection(driver.ComputeDriver):
having to do the aggregation. On those platforms, this method is
unused.
- Note that this function takes an instance ID, not a
- compute.service.Instance, so that it can be called by compute.monitor.
+ Note that this function takes an instance ID.
"""
return [0L, 0L, 0L, 0L, None]
@@ -395,8 +393,7 @@ class FakeConnection(driver.ComputeDriver):
having to do the aggregation. On those platforms, this method is
unused.
- Note that this function takes an instance ID, not a
- compute.service.Instance, so that it can be called by compute.monitor.
+ Note that this function takes an instance ID.
"""
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 81c7dea58..3428a7fc1 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -66,7 +66,6 @@ import time
from nova import exception
from nova import flags
from nova import log as logging
-from nova.auth import manager
from nova.compute import power_state
from nova.virt import driver
from nova.virt import images
@@ -139,19 +138,19 @@ class HyperVConnection(driver.ComputeDriver):
return instance_infos
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance, network_info,
+ block_device_mapping=None):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
if vm is not None:
raise exception.InstanceExists(name=instance.name)
- user = manager.AuthManager().get_user(instance['user_id'])
- project = manager.AuthManager().get_project(instance['project_id'])
#Fetch the file, assume it is a VHD file.
base_vhd_filename = os.path.join(FLAGS.instances_path,
instance.name)
vhdfile = "%s.vhd" % (base_vhd_filename)
- images.fetch(instance['image_ref'], vhdfile, user, project)
+ images.fetch(instance['image_ref'], vhdfile,
+ instance['user_id'], instance['project_id'])
try:
self._create_vm(instance)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 40bf6107c..54c691a40 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -21,7 +21,6 @@
Handling of VM disk images.
"""
-from nova import context
from nova import flags
from nova.image import glance as glance_image_service
import nova.image
@@ -33,13 +32,12 @@ FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.virt.images')
-def fetch(image_href, path, _user, _project):
+def fetch(context, image_href, path, _user_id, _project_id):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
(image_service, image_id) = nova.image.get_image_service(image_href)
with open(path, "wb") as image_file:
- elevated = context.get_admin_context()
- metadata = image_service.get(elevated, image_id, image_file)
+ metadata = image_service.get(context, image_id, image_file)
return metadata
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index c27e92feb..0acf25d28 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -54,7 +54,7 @@ from xml.etree import ElementTree
from eventlet import greenthread
from eventlet import tpool
-from nova import context
+from nova import context as nova_context
from nova import db
from nova import exception
from nova import flags
@@ -174,7 +174,7 @@ class LibvirtConnection(driver.ComputeDriver):
def init_host(self, host):
# Adopt existing VM's running here
- ctxt = context.get_admin_context()
+ ctxt = nova_context.get_admin_context()
for instance in db.instance_get_all_by_host(ctxt, host):
try:
LOG.debug(_('Checking state of %s'), instance['name'])
@@ -396,7 +396,7 @@ class LibvirtConnection(driver.ComputeDriver):
virt_dom.detachDevice(xml)
@exception.wrap_exception()
- def snapshot(self, instance, image_href):
+ def snapshot(self, context, instance, image_href):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+, the qemu_img flag is
@@ -405,14 +405,13 @@ class LibvirtConnection(driver.ComputeDriver):
"""
virt_dom = self._lookup_by_name(instance['name'])
- elevated = context.get_admin_context()
(image_service, image_id) = nova.image.get_image_service(
instance['image_ref'])
- base = image_service.show(elevated, image_id)
+ base = image_service.show(context, image_id)
(snapshot_image_service, snapshot_image_id) = \
nova.image.get_image_service(image_href)
- snapshot = snapshot_image_service.show(elevated, snapshot_image_id)
+ snapshot = snapshot_image_service.show(context, snapshot_image_id)
metadata = {'disk_format': base['disk_format'],
'container_format': base['container_format'],
@@ -463,7 +462,7 @@ class LibvirtConnection(driver.ComputeDriver):
# Upload that image to the image service
with open(out_path) as image_file:
- image_service.update(elevated,
+ image_service.update(context,
image_href,
metadata,
image_file)
@@ -538,7 +537,7 @@ class LibvirtConnection(driver.ComputeDriver):
dom.create()
@exception.wrap_exception()
- def rescue(self, instance, callback, network_info):
+ def rescue(self, context, instance, callback, network_info):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
@@ -553,7 +552,7 @@ class LibvirtConnection(driver.ComputeDriver):
rescue_images = {'image_id': FLAGS.rescue_image_id,
'kernel_id': FLAGS.rescue_kernel_id,
'ramdisk_id': FLAGS.rescue_ramdisk_id}
- self._create_image(instance, xml, '.rescue', rescue_images)
+ self._create_image(context, instance, xml, '.rescue', rescue_images)
self._create_new_domain(xml)
def _wait_for_rescue():
@@ -592,13 +591,14 @@ class LibvirtConnection(driver.ComputeDriver):
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
@exception.wrap_exception()
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance, network_info,
+ block_device_mapping=None):
xml = self.to_xml(instance, False, network_info=network_info,
block_device_mapping=block_device_mapping)
block_device_mapping = block_device_mapping or []
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
- self._create_image(instance, xml, network_info=network_info,
+ self._create_image(context, instance, xml, network_info=network_info,
block_device_mapping=block_device_mapping)
domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
@@ -769,9 +769,10 @@ class LibvirtConnection(driver.ComputeDriver):
else:
utils.execute('cp', base, target)
- def _fetch_image(self, target, image_id, user, project, size=None):
+ def _fetch_image(self, context, target, image_id, user_id, project_id,
+ size=None):
"""Grab image and optionally attempt to resize it"""
- images.fetch(image_id, target, user, project)
+ images.fetch(context, image_id, target, user_id, project_id)
if size:
disk.extend(target, size)
@@ -780,8 +781,9 @@ class LibvirtConnection(driver.ComputeDriver):
utils.execute('truncate', target, '-s', "%dG" % local_gb)
# TODO(vish): should we format disk by default?
- def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
- network_info=None, block_device_mapping=None):
+ def _create_image(self, context, inst, libvirt_xml, suffix='',
+ disk_images=None, network_info=None,
+ block_device_mapping=None):
block_device_mapping = block_device_mapping or []
if not suffix:
@@ -809,9 +811,6 @@ class LibvirtConnection(driver.ComputeDriver):
os.close(os.open(basepath('console.log', ''),
os.O_CREAT | os.O_WRONLY, 0660))
- user = manager.AuthManager().get_user(inst['user_id'])
- project = manager.AuthManager().get_project(inst['project_id'])
-
if not disk_images:
disk_images = {'image_id': inst['image_ref'],
'kernel_id': inst['kernel_id'],
@@ -820,19 +819,21 @@ class LibvirtConnection(driver.ComputeDriver):
if disk_images['kernel_id']:
fname = '%08x' % int(disk_images['kernel_id'])
self._cache_image(fn=self._fetch_image,
+ context=context,
target=basepath('kernel'),
fname=fname,
image_id=disk_images['kernel_id'],
- user=user,
- project=project)
+ user_id=inst['user_id'],
+ project_id=inst['project_id'])
if disk_images['ramdisk_id']:
fname = '%08x' % int(disk_images['ramdisk_id'])
self._cache_image(fn=self._fetch_image,
+ context=context,
target=basepath('ramdisk'),
fname=fname,
image_id=disk_images['ramdisk_id'],
- user=user,
- project=project)
+ user_id=inst['user_id'],
+ project_id=inst['project_id'])
root_fname = hashlib.sha1(disk_images['image_id']).hexdigest()
size = FLAGS.minimum_root_size
@@ -846,12 +847,13 @@ class LibvirtConnection(driver.ComputeDriver):
if not self._volume_in_mapping(self.root_mount_device,
block_device_mapping):
self._cache_image(fn=self._fetch_image,
+ context=context,
target=basepath('disk'),
fname=root_fname,
cow=FLAGS.use_cow_images,
image_id=disk_images['image_id'],
- user=user,
- project=project,
+ user_id=inst['user_id'],
+ project_id=inst['project_id'],
size=size)
if inst_type['local_gb'] and not self._volume_in_mapping(
@@ -882,7 +884,7 @@ class LibvirtConnection(driver.ComputeDriver):
ifc_template = open(FLAGS.injected_network_template).read()
ifc_num = -1
have_injected_networks = False
- admin_context = context.get_admin_context()
+ admin_context = nova_context.get_admin_context()
for (network_ref, mapping) in network_info:
ifc_num += 1
@@ -1090,8 +1092,7 @@ class LibvirtConnection(driver.ComputeDriver):
def get_disks(self, instance_name):
"""
- Note that this function takes an instance name, not an Instance, so
- that it can be called by monitor.
+ Note that this function takes an instance name.
Returns a list of all block devices for this domain.
"""
@@ -1132,8 +1133,7 @@ class LibvirtConnection(driver.ComputeDriver):
def get_interfaces(self, instance_name):
"""
- Note that this function takes an instance name, not an Instance, so
- that it can be called by monitor.
+ Note that this function takes an instance name.
Returns a list of all network interfaces for this instance.
"""
@@ -1348,16 +1348,14 @@ class LibvirtConnection(driver.ComputeDriver):
def block_stats(self, instance_name, disk):
"""
- Note that this function takes an instance name, not an Instance, so
- that it can be called by monitor.
+ Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
def interface_stats(self, instance_name, interface):
"""
- Note that this function takes an instance name, not an Instance, so
- that it can be called by monitor.
+ Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 1ee8fa1c0..07a6ba6ab 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -26,7 +26,7 @@ import urllib
import urllib2
import uuid
-from nova import context
+from nova import context as nova_context
from nova import db
from nova import exception
from nova import flags
@@ -89,7 +89,7 @@ class VMWareVMOps(object):
LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
return lst_vm_names
- def spawn(self, instance, network_info):
+ def spawn(self, context, instance, network_info):
"""
Creates a VM instance.
@@ -111,7 +111,7 @@ class VMWareVMOps(object):
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
- network = db.network_get_by_instance(context.get_admin_context(),
+ network = db.network_get_by_instance(nova_context.get_admin_context(),
instance['id'])
net_name = network['bridge']
@@ -329,7 +329,7 @@ class VMWareVMOps(object):
LOG.debug(_("Powered on the VM instance %s") % instance.name)
_power_on_vm()
- def snapshot(self, instance, snapshot_name):
+ def snapshot(self, context, instance, snapshot_name):
"""
Create snapshot from a running VM instance.
Steps followed are:
@@ -721,11 +721,11 @@ class VMWareVMOps(object):
Set the machine id of the VM for guest tools to pick up and change
the IP.
"""
- admin_context = context.get_admin_context()
+ admin_context = nova_context.get_admin_context()
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
- network = db.network_get_by_instance(context.get_admin_context(),
+ network = db.network_get_by_instance(nova_context.get_admin_context(),
instance['id'])
mac_address = None
if instance['mac_addresses']:
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py
index ce57847b2..3d209fa99 100644
--- a/nova/virt/vmwareapi_conn.py
+++ b/nova/virt/vmwareapi_conn.py
@@ -124,13 +124,14 @@ class VMWareESXConnection(driver.ComputeDriver):
"""List VM instances."""
return self._vmops.list_instances()
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance, network_info,
+ block_device_mapping=None):
"""Create VM instance."""
- self._vmops.spawn(instance, network_info)
+ self._vmops.spawn(context, instance, network_info)
- def snapshot(self, instance, name):
+ def snapshot(self, context, instance, name):
"""Create snapshot from a running VM instance."""
- self._vmops.snapshot(instance, name)
+ self._vmops.snapshot(context, instance, name)
def reboot(self, instance, network_info):
"""Reboot VM instance."""
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 62863c6d8..63bc191cf 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -37,7 +37,6 @@ import nova.image
from nova.image import glance as glance_image_service
from nova import log as logging
from nova import utils
-from nova.auth.manager import AuthManager
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import disk
@@ -85,38 +84,22 @@ class ImageType:
DISK = 2
DISK_RAW = 3
DISK_VHD = 4
+ _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
DISK_STR = "os"
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
+ _strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR)
@classmethod
def to_string(cls, image_type):
- if image_type == ImageType.KERNEL:
- return ImageType.KERNEL_STR
- elif image_type == ImageType.RAMDISK:
- return ImageType.RAMDISK_STR
- elif image_type == ImageType.DISK:
- return ImageType.DISK_STR
- elif image_type == ImageType.DISK_RAW:
- return ImageType.DISK_RAW_STR
- elif image_type == ImageType.DISK_VHD:
- return ImageType.VHD_STR
+ return dict(zip(ImageType._ids, ImageType._strs)).get(image_type)
@classmethod
def from_string(cls, image_type_str):
- if image_type_str == ImageType.KERNEL_STR:
- return ImageType.KERNEL
- elif image_type == ImageType.RAMDISK_STR:
- return ImageType.RAMDISK
- elif image_type == ImageType.DISK_STR:
- return ImageType.DISK
- elif image_type == ImageType.DISK_RAW_STR:
- return ImageType.DISK_RAW
- elif image_type == ImageType.DISK_VHD_STR:
- return ImageType.VHD
+ return dict(zip(ImageType._strs, ImageType._ids)).get(image_type_str)
class VMHelper(HelperBase):
@@ -359,7 +342,7 @@ class VMHelper(HelperBase):
return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
@classmethod
- def upload_image(cls, session, instance, vdi_uuids, image_id):
+ def upload_image(cls, context, session, instance, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
@@ -377,37 +360,30 @@ class VMHelper(HelperBase):
'glance_host': glance_host,
'glance_port': glance_port,
'sr_path': cls.get_sr_path(session),
- 'os_type': os_type}
+ 'os_type': os_type,
+ 'auth_token': getattr(context, 'auth_token', None)}
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
session.wait_for_task(task, instance.id)
@classmethod
- def fetch_image(cls, session, instance_id, image, user, project,
- image_type):
- """
- image_type is interpreted as an ImageType instance
- Related flags:
- xenapi_image_service = ['glance', 'objectstore']
- glance_address = 'address for glance services'
- glance_port = 'port for glance services'
+ def fetch_image(cls, context, session, instance_id, image, user_id,
+ project_id, image_type):
+ """Fetch image from glance based on image type.
- Returns: A single filename if image_type is KERNEL_RAMDISK
+ Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
- access = AuthManager().get_access_key(user, project)
-
- if FLAGS.xenapi_image_service == 'glance':
- return cls._fetch_image_glance(session, instance_id, image,
- access, image_type)
+ if image_type == ImageType.DISK_VHD:
+ return cls._fetch_image_glance_vhd(context,
+ session, instance_id, image, image_type)
else:
- return cls._fetch_image_objectstore(session, instance_id, image,
- access, user.secret,
- image_type)
+ return cls._fetch_image_glance_disk(context,
+ session, instance_id, image, image_type)
@classmethod
- def _fetch_image_glance_vhd(cls, session, instance_id, image, access,
+ def _fetch_image_glance_vhd(cls, context, session, instance_id, image,
image_type):
"""Tell glance to download an image and put the VHDs into the SR
@@ -429,7 +405,8 @@ class VMHelper(HelperBase):
'glance_host': glance_host,
'glance_port': glance_port,
'uuid_stack': uuid_stack,
- 'sr_path': cls.get_sr_path(session)}
+ 'sr_path': cls.get_sr_path(session),
+ 'auth_token': getattr(context, 'auth_token', None)}
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'download_vhd', kwargs)
@@ -455,7 +432,7 @@ class VMHelper(HelperBase):
return vdis
@classmethod
- def _fetch_image_glance_disk(cls, session, instance_id, image, access,
+ def _fetch_image_glance_disk(cls, context, session, instance_id, image,
image_type):
"""Fetch the image from Glance
@@ -475,6 +452,7 @@ class VMHelper(HelperBase):
sr_ref = safe_find_sr(session)
glance_client, image_id = nova.image.get_glance_client(image)
+ glance_client.set_auth_token(getattr(context, 'auth_token', None))
meta, image_file = glance_client.get_image(image_id)
virtual_size = int(meta['size'])
vdi_size = virtual_size
@@ -578,136 +556,38 @@ class VMHelper(HelperBase):
else:
return ImageType.DISK_RAW
- # FIXME(sirp): can we unify the ImageService and xenapi_image_service
- # abstractions?
- if FLAGS.xenapi_image_service == 'glance':
- image_type = determine_from_glance()
- else:
- image_type = determine_from_instance()
+ image_type = determine_from_glance()
log_disk_format(image_type)
return image_type
@classmethod
- def _fetch_image_glance(cls, session, instance_id, image, access,
- image_type):
- """Fetch image from glance based on image type.
-
- Returns: A single filename if image_type is KERNEL or RAMDISK
- A list of dictionaries that describe VDIs, otherwise
- """
- if image_type == ImageType.DISK_VHD:
- return cls._fetch_image_glance_vhd(
- session, instance_id, image, access, image_type)
- else:
- return cls._fetch_image_glance_disk(
- session, instance_id, image, access, image_type)
-
- @classmethod
- def _fetch_image_objectstore(cls, session, instance_id, image, access,
- secret, image_type):
- """Fetch an image from objectstore.
-
- Returns: A single filename if image_type is KERNEL or RAMDISK
- A list of dictionaries that describe VDIs, otherwise
- """
- url = "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
- image)
- LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
- if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
- fn = 'get_kernel'
- else:
- fn = 'get_vdi'
- args = {}
- args['src_url'] = url
- args['username'] = access
- args['password'] = secret
- args['add_partition'] = 'false'
- args['raw'] = 'false'
- if not image_type in (ImageType.KERNEL, ImageType.RAMDISK):
- args['add_partition'] = 'true'
- if image_type == ImageType.DISK_RAW:
- args['raw'] = 'true'
- task = session.async_call_plugin('objectstore', fn, args)
- vdi_uuid = None
- filename = None
- if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
- filename = session.wait_for_task(task, instance_id)
- else:
- vdi_uuid = session.wait_for_task(task, instance_id)
- return [dict(vdi_type=ImageType.to_string(image_type),
- vdi_uuid=vdi_uuid,
- file=filename)]
-
- @classmethod
def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
os_type):
"""
Determine whether the VM will use a paravirtualized kernel or if it
will use hardware virtualization.
- 1. Objectstore (any image type):
- We use plugin to figure out whether the VDI uses PV
-
- 2. Glance (VHD): then we use `os_type`, raise if not set
-
- 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
- available
-
- 4. Glance (DISK): pv is assumed
- """
- if FLAGS.xenapi_image_service == 'glance':
- # 2, 3, 4: Glance
- return cls._determine_is_pv_glance(
- session, vdi_ref, disk_image_type, os_type)
- else:
- # 1. Objecstore
- return cls._determine_is_pv_objectstore(session, instance_id,
- vdi_ref)
-
- @classmethod
- def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref):
- LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
- fn = "is_vdi_pv"
- args = {}
- args['vdi-ref'] = vdi_ref
- task = session.async_call_plugin('objectstore', fn, args)
- pv_str = session.wait_for_task(task, instance_id)
- pv = None
- if pv_str.lower() == 'true':
- pv = True
- elif pv_str.lower() == 'false':
- pv = False
- LOG.debug(_("PV Kernel in VDI:%s"), pv)
- return pv
-
- @classmethod
- def _determine_is_pv_glance(cls, session, vdi_ref, disk_image_type,
- os_type):
- """
- For a Glance image, determine if we need paravirtualization.
-
- The relevant scenarios are:
- 2. Glance (VHD): then we use `os_type`, raise if not set
+ 1. Glance (VHD): then we use `os_type`, raise if not set
- 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
+ 2. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
available
- 4. Glance (DISK): pv is assumed
+ 3. Glance (DISK): pv is assumed
"""
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
if disk_image_type == ImageType.DISK_VHD:
- # 2. VHD
+ # 1. VHD
if os_type == 'windows':
is_pv = False
else:
is_pv = True
elif disk_image_type == ImageType.DISK_RAW:
- # 3. RAW
+ # 2. RAW
is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
elif disk_image_type == ImageType.DISK:
- # 4. Disk
+ # 3. Disk
is_pv = True
else:
raise exception.Error(_("Unknown image format %(disk_image_type)s")
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 2640e1ba2..b3b812a48 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -30,7 +30,7 @@ import sys
import time
import uuid
-from nova import context
+from nova import context as nova_context
from nova import db
from nova import exception
from nova import flags
@@ -38,7 +38,6 @@ from nova import ipv6
from nova import log as logging
from nova import utils
-from nova.auth.manager import AuthManager
from nova.compute import power_state
from nova.virt import driver
from nova.virt.xenapi.network_utils import NetworkHelper
@@ -110,17 +109,19 @@ class VMOps(object):
instance_infos.append(instance_info)
return instance_infos
- def revert_resize(self, instance):
+ def revert_migration(self, instance):
vm_ref = VMHelper.lookup(self._session, instance.name)
self._start(instance, vm_ref)
- def finish_resize(self, instance, disk_info, network_info):
+ def finish_migration(self, context, instance, disk_info, network_info,
+ resize_instance):
vdi_uuid = self.link_disks(instance, disk_info['base_copy'],
disk_info['cow'])
- vm_ref = self._create_vm(instance,
+ vm_ref = self._create_vm(context, instance,
[dict(vdi_type='os', vdi_uuid=vdi_uuid)],
network_info)
- self.resize_instance(instance, vdi_uuid)
+ if resize_instance:
+ self.resize_instance(instance, vdi_uuid)
self._spawn(instance, vm_ref)
def _start(self, instance, vm_ref=None):
@@ -133,20 +134,19 @@ class VMOps(object):
LOG.debug(_("Starting instance %s"), instance.name)
self._session.call_xenapi('VM.start', vm_ref, False, False)
- def _create_disks(self, instance):
- user = AuthManager().get_user(instance.user_id)
- project = AuthManager().get_project(instance.project_id)
+ def _create_disks(self, context, instance):
disk_image_type = VMHelper.determine_disk_image_type(instance)
- vdis = VMHelper.fetch_image(self._session,
- instance.id, instance.image_ref, user, project,
+ vdis = VMHelper.fetch_image(context, self._session,
+ instance.id, instance.image_ref,
+ instance.user_id, instance.project_id,
disk_image_type)
return vdis
- def spawn(self, instance, network_info):
+ def spawn(self, context, instance, network_info):
vdis = None
try:
- vdis = self._create_disks(instance)
- vm_ref = self._create_vm(instance, vdis, network_info)
+ vdis = self._create_disks(context, instance)
+ vm_ref = self._create_vm(context, instance, vdis, network_info)
self._spawn(instance, vm_ref)
except (self.XenAPI.Failure, OSError, IOError) as spawn_error:
LOG.exception(_("instance %s: Failed to spawn"),
@@ -156,11 +156,11 @@ class VMOps(object):
self._handle_spawn_error(vdis, spawn_error)
raise spawn_error
- def spawn_rescue(self, instance):
+ def spawn_rescue(self, context, instance, network_info):
"""Spawn a rescue instance."""
- self.spawn(instance)
+ self.spawn(context, instance, network_info)
- def _create_vm(self, instance, vdis, network_info):
+ def _create_vm(self, context, instance, vdis, network_info):
"""Create VM instance."""
instance_name = instance.name
vm_ref = VMHelper.lookup(self._session, instance_name)
@@ -171,26 +171,23 @@ class VMOps(object):
if not VMHelper.ensure_free_mem(self._session, instance):
LOG.exception(_('instance %(instance_name)s: not enough free '
'memory') % locals())
- db.instance_set_state(context.get_admin_context(),
+ db.instance_set_state(nova_context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
return
- user = AuthManager().get_user(instance.user_id)
- project = AuthManager().get_project(instance.project_id)
-
disk_image_type = VMHelper.determine_disk_image_type(instance)
kernel = None
ramdisk = None
try:
if instance.kernel_id:
- kernel = VMHelper.fetch_image(self._session, instance.id,
- instance.kernel_id, user, project,
- ImageType.KERNEL)[0]
+ kernel = VMHelper.fetch_image(context, self._session,
+ instance.id, instance.kernel_id, instance.user_id,
+ instance.project_id, ImageType.KERNEL)[0]
if instance.ramdisk_id:
- ramdisk = VMHelper.fetch_image(self._session, instance.id,
- instance.ramdisk_id, user, project,
- ImageType.RAMDISK)[0]
+ ramdisk = VMHelper.fetch_image(context, self._session,
+ instance.id, instance.kernel_id, instance.user_id,
+ instance.project_id, ImageType.RAMDISK)[0]
# Create the VM ref and attach the first disk
first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
vdis[0]['vdi_uuid'])
@@ -209,7 +206,7 @@ class VMOps(object):
if instance.vm_mode != vm_mode:
# Update database with normalized (or determined) value
- db.instance_update(context.get_admin_context(),
+ db.instance_update(nova_context.get_admin_context(),
instance['id'], {'vm_mode': vm_mode})
vm_ref = VMHelper.create_vm(self._session, instance,
kernel and kernel.get('file', None) or None,
@@ -271,7 +268,7 @@ class VMOps(object):
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
% locals())
- ctx = context.get_admin_context()
+ ctx = nova_context.get_admin_context()
agent_build = db.agent_build_get_by_triple(ctx, 'xen',
instance.os_type, instance.architecture)
if agent_build:
@@ -415,7 +412,7 @@ class VMOps(object):
# if instance_or_vm is an int/long it must be instance id
elif isinstance(instance_or_vm, (int, long)):
- ctx = context.get_admin_context()
+ ctx = nova_context.get_admin_context()
instance_obj = db.instance_get(ctx, instance_or_vm)
instance_name = instance_obj.name
else:
@@ -440,9 +437,10 @@ class VMOps(object):
vm,
"start")
- def snapshot(self, instance, image_id):
+ def snapshot(self, context, instance, image_id):
"""Create snapshot from a running VM instance.
+ :param context: request context
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
@@ -467,11 +465,11 @@ class VMOps(object):
try:
template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
# call plugin to ship snapshot off to glance
- VMHelper.upload_image(
+ VMHelper.upload_image(context,
self._session, instance, template_vdi_uuids, image_id)
finally:
if template_vm_ref:
- self._destroy(instance, template_vm_ref, None,
+ self._destroy(instance, template_vm_ref,
shutdown=False, destroy_kernel_ramdisk=False)
logging.debug(_("Finished snapshot and upload for VM %s"), instance)
@@ -568,18 +566,22 @@ class VMOps(object):
return new_cow_uuid
def resize_instance(self, instance, vdi_uuid):
- """Resize a running instance by changing it's RAM and disk size."""
+ """Resize a running instance by changing its RAM and disk size."""
#TODO(mdietz): this will need to be adjusted for swap later
#The new disk size must be in bytes
- new_disk_size = str(instance.local_gb * 1024 * 1024 * 1024)
- instance_name = instance.name
- instance_local_gb = instance.local_gb
- LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance %(instance_name)s."
- " Expanding to %(instance_local_gb)d GB") % locals())
- vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
- self._session.call_xenapi('VDI.resize_online', vdi_ref, new_disk_size)
- LOG.debug(_("Resize instance %s complete") % (instance.name))
+ new_disk_size = instance.local_gb * 1024 * 1024 * 1024
+ if new_disk_size > 0:
+ instance_name = instance.name
+ instance_local_gb = instance.local_gb
+ LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance"
+ "%(instance_name)s. Expanding to %(instance_local_gb)d"
+ " GB") % locals())
+ vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
+ # for an instance with no local storage
+ self._session.call_xenapi('VDI.resize_online', vdi_ref,
+ str(new_disk_size))
+ LOG.debug(_("Resize instance %s complete") % (instance.name))
def reboot(self, instance):
"""Reboot VM instance."""
@@ -684,7 +686,7 @@ class VMOps(object):
# Successful return code from password is '0'
if resp_dict['returncode'] != '0':
raise RuntimeError(resp_dict['message'])
- db.instance_update(context.get_admin_context(),
+ db.instance_update(nova_context.get_admin_context(),
instance['id'],
dict(admin_pass=new_pass))
return resp_dict['message']
@@ -853,7 +855,7 @@ class VMOps(object):
vm_ref = VMHelper.lookup(self._session, instance.name)
return self._destroy(instance, vm_ref, network_info, shutdown=True)
- def _destroy(self, instance, vm_ref, network_info, shutdown=True,
+ def _destroy(self, instance, vm_ref, network_info=None, shutdown=True,
destroy_kernel_ramdisk=True):
"""Destroys VM instance by performing:
@@ -912,7 +914,7 @@ class VMOps(object):
True)
self._wait_with_callback(instance.id, task, callback)
- def rescue(self, instance, callback):
+ def rescue(self, context, instance, callback, network_info):
"""Rescue the specified instance.
- shutdown the instance VM.
@@ -930,7 +932,7 @@ class VMOps(object):
self._shutdown(instance, vm_ref)
self._acquire_bootlock(vm_ref)
instance._rescue = True
- self.spawn_rescue(instance)
+ self.spawn_rescue(context, instance, network_info)
rescue_vm_ref = VMHelper.lookup(self._session, instance.name)
vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0]
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index cddb8203b..39afbd650 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -101,9 +101,6 @@ flags.DEFINE_float('xenapi_task_poll_interval',
'The interval used for polling of remote tasks '
'(Async.VM.start, etc). Used only if '
'connection_type=xenapi.')
-flags.DEFINE_string('xenapi_image_service',
- 'glance',
- 'Where to get VM images: glance or objectstore.')
flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval',
5.0,
'The interval used for polling of coalescing vhds.'
@@ -187,21 +184,24 @@ class XenAPIConnection(driver.ComputeDriver):
def list_instances_detail(self):
return self._vmops.list_instances_detail()
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance, network_info,
+ block_device_mapping=None):
"""Create VM instance"""
- self._vmops.spawn(instance, network_info)
+ self._vmops.spawn(context, instance, network_info)
- def revert_resize(self, instance):
+ def revert_migration(self, instance):
"""Reverts a resize, powering back on the instance"""
self._vmops.revert_resize(instance)
- def finish_resize(self, instance, disk_info, network_info):
+ def finish_migration(self, context, instance, disk_info, network_info,
+ resize_instance=False):
"""Completes a resize, turning on the migrated instance"""
- self._vmops.finish_resize(instance, disk_info, network_info)
+ self._vmops.finish_migration(context, instance, disk_info,
+ network_info, resize_instance)
- def snapshot(self, instance, image_id):
+ def snapshot(self, context, instance, image_id):
""" Create snapshot from a running VM instance """
- self._vmops.snapshot(instance, image_id)
+ self._vmops.snapshot(context, instance, image_id)
def reboot(self, instance, network_info):
"""Reboot VM instance"""
@@ -242,9 +242,9 @@ class XenAPIConnection(driver.ComputeDriver):
"""resume the specified instance"""
self._vmops.resume(instance, callback)
- def rescue(self, instance, callback, network_info):
+ def rescue(self, context, instance, callback, network_info):
"""Rescue the specified instance"""
- self._vmops.rescue(instance, callback)
+ self._vmops.rescue(context, instance, callback, network_info)
def unrescue(self, instance, callback, network_info):
"""Unrescue the specified instance"""
diff --git a/nova/wsgi.py b/nova/wsgi.py
index eae3afcb4..c8ddb97d7 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -274,6 +274,18 @@ class Middleware(Application):
return self.process_response(response)
+class InjectContext(Middleware):
+ """Add a 'nova.context' to WSGI environ."""
+ def __init__(self, context, *args, **kwargs):
+ self.context = context
+ super(InjectContext, self).__init__(*args, **kwargs)
+
+ @webob.dec.wsgify(RequestClass=Request)
+ def __call__(self, req):
+ req.environ['nova.context'] = self.context
+ return self.application
+
+
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
index fbe080b22..86e837849 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -67,12 +67,17 @@ def _copy_kernel_vdi(dest, copy_args):
def _download_tarball(sr_path, staging_path, image_id, glance_host,
- glance_port):
+ glance_port, auth_token):
"""Download the tarball image from Glance and extract it into the staging
area.
"""
+ # Build request headers
+ headers = {}
+ if auth_token:
+ headers['x-auth-token'] = auth_token
+
conn = httplib.HTTPConnection(glance_host, glance_port)
- conn.request('GET', '/v1/images/%s' % image_id)
+ conn.request('GET', '/v1/images/%s' % image_id, headers=headers)
resp = conn.getresponse()
if resp.status == httplib.NOT_FOUND:
raise Exception("Image '%s' not found in Glance" % image_id)
@@ -236,7 +241,8 @@ def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids):
os.link(source, link_name)
-def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type):
+def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type,
+ auth_token):
"""
Create a tarball of the image and then stream that into Glance
using chunked-transfer-encoded HTTP.
@@ -263,6 +269,10 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type):
'x-image-meta-container-format': 'ovf',
'x-image-meta-property-os-type': os_type}
+ # If we have an auth_token, set an x-auth-token header
+ if auth_token:
+ headers['x-auth-token'] = auth_token
+
for header, value in headers.iteritems():
conn.putheader(header, value)
conn.endheaders()
@@ -364,11 +374,12 @@ def download_vhd(session, args):
glance_port = params["glance_port"]
uuid_stack = params["uuid_stack"]
sr_path = params["sr_path"]
+ auth_token = params["auth_token"]
staging_path = _make_staging_area(sr_path)
try:
_download_tarball(sr_path, staging_path, image_id, glance_host,
- glance_port)
+ glance_port, auth_token)
# Right now, it's easier to return a single string via XenAPI,
# so we'll json encode the list of VHDs.
return json.dumps(_import_vhds(sr_path, staging_path, uuid_stack))
@@ -386,12 +397,13 @@ def upload_vhd(session, args):
glance_port = params["glance_port"]
sr_path = params["sr_path"]
os_type = params["os_type"]
+ auth_token = params["auth_token"]
staging_path = _make_staging_area(sr_path)
try:
_prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids)
_upload_tarball(staging_path, image_id, glance_host, glance_port,
- os_type)
+ os_type, auth_token)
finally:
_cleanup_staging_area(staging_path)
diff --git a/po/ast.po b/po/ast.po
index ac6190424..449cddb07 100644
--- a/po/ast.po
+++ b/po/ast.po
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr ""
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr ""
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr ""
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr ""
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr ""
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr ""
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1778,34 +1751,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr ""
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr ""
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2263,10 +2208,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/cs.po b/po/cs.po
index 33f7808a5..2dc763838 100644
--- a/po/cs.po
+++ b/po/cs.po
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr ""
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr ""
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr ""
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr ""
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr ""
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr ""
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1778,34 +1751,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr ""
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr ""
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2265,10 +2210,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/da.po b/po/da.po
index 5aead53c3..570629119 100644
--- a/po/da.po
+++ b/po/da.po
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr ""
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr ""
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr ""
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr ""
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr ""
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr ""
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1778,34 +1751,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr ""
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr ""
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2263,10 +2208,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/de.po b/po/de.po
index cf40a0f5e..772ae236c 100644
--- a/po/de.po
+++ b/po/de.po
@@ -131,33 +131,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr ""
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr "PID-Datei %s existiert nicht. Läuft der Daemon nicht?\n"
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr "Kein passender Prozess gefunden"
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr "Bedient %s"
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr "Alle vorhandenen FLAGS:"
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr "%s wird gestartet"
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1785,34 +1758,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr ""
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr ""
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2270,10 +2215,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/en_AU.po b/po/en_AU.po
index e53f9fc07..3fa62c006 100644
--- a/po/en_AU.po
+++ b/po/en_AU.po
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr ""
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr ""
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr ""
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr ""
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr ""
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr ""
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1778,34 +1751,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr ""
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr ""
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2263,10 +2208,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/en_GB.po b/po/en_GB.po
index 601f6170b..b204c93a1 100644
--- a/po/en_GB.po
+++ b/po/en_GB.po
@@ -130,33 +130,6 @@ msgstr "compute.api::suspend %s"
msgid "compute.api::resume %s"
msgstr "compute.api::resume %s"
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr "Wrong number of arguments."
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr "pidfile %s does not exist. Daemon not running?\n"
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr "No such process"
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr "Serving %s"
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr "Full set of FLAGS:"
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr "Starting %s"
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1803,34 +1776,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr ""
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr ""
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2288,10 +2233,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/es.po b/po/es.po
index eeaf209a9..f97434041 100644
--- a/po/es.po
+++ b/po/es.po
@@ -130,33 +130,6 @@ msgstr "compute.api::suspend %s"
msgid "compute.api::resume %s"
msgstr "compute.api::resume %s"
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr "Cantidad de argumentos incorrecta"
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr "El \"pidfile\" %s no existe. Quizás el servicio no este corriendo.\n"
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr "No existe el proceso"
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr "Sirviendo %s"
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr "Conjunto completo de opciones (FLAGS):"
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr "Iniciando %s"
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1819,34 +1792,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr "Obtenida excepción %s"
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr "actualizando %s..."
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr "error inesperado durante la actualización"
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr "excepción inexperada al obtener la conexión"
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr "Encontrada interfaz: %s"
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2309,10 +2254,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/fr.po b/po/fr.po
index 9dd789b3c..83e4e7af0 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -133,35 +133,6 @@ msgstr "compute.api::suspend %s"
msgid "compute.api::resume %s"
msgstr "compute.api::resume %s"
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr "Nombre d'arguments incorrect."
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr ""
-"Le fichier pid %s n'existe pas. Est-ce que le processus est en cours "
-"d'exécution ?\n"
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr "Aucun processus de ce type"
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr "En train de servir %s"
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr "Ensemble de propriétés complet :"
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr "Démarrage de %s"
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1865,34 +1836,6 @@ msgstr "Tâche [%(name)s] %(task)s état : %(status)s %(error_info)s"
msgid "Got exception: %s"
msgstr "Reçu exception : %s"
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr "mise à jour %s..."
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr "erreur inopinée pendant la ise à jour"
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr "Ne peut pas récupérer blockstats pour \"%(disk)s\" sur \"%(iid)s\""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr "Ne peut pas récupérer ifstats pour \"%(interface)s\" sur \"%(iid)s\""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr "erreur inopinée pendant la connexion"
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr "Instance trouvée : %s"
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2373,10 +2316,6 @@ msgstr "Démarrage %(arg0)s sur %(host)s:%(port)s"
msgid "You must implement __call__"
msgstr "Vous devez implémenter __call__"
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr "Démarrage du superviseur d'instance"
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr "Allocation IP"
diff --git a/po/it.po b/po/it.po
index 60c3a388a..6bfcf1274 100644
--- a/po/it.po
+++ b/po/it.po
@@ -134,34 +134,6 @@ msgstr "compute.api::suspend %s"
msgid "compute.api::resume %s"
msgstr "compute.api::resume %s"
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr "Numero errato di argomenti"
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr ""
-"Il pidfile %s non esiste. Assicurarsi che il demone é in esecuzione.\n"
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr "Nessun processo trovato"
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr "Servire %s"
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr "Insieme di FLAGS:"
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr "Avvio di %s"
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1791,34 +1763,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr ""
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr ""
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2278,10 +2222,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/ja.po b/po/ja.po
index 09d17af3b..a7906ede8 100644
--- a/po/ja.po
+++ b/po/ja.po
@@ -130,33 +130,6 @@ msgstr "例外: compute.api::suspend %s"
msgid "compute.api::resume %s"
msgstr "例外: compute.api::resume %s"
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr "引数の数が異なります。"
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr "pidfile %s が存在しません。デーモンは実行中ですか?\n"
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr "そのようなプロセスはありません"
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr "%s サービスの開始"
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr "FLAGSの一覧:"
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr "%s を起動中"
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1808,34 +1781,6 @@ msgstr "タスク [%(name)s] %(task)s 状態: %(status)s %(error_info)s"
msgid "Got exception: %s"
msgstr "例外 %s が発生しました。"
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr "%s の情報の更新…"
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr "更新の最中に予期しないエラーが発生しました。"
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr "\"%(iid)s\" 上の \"%(disk)s\" 用のブロック統計(blockstats)が取得できません"
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr "\"%(iid)s\" 上の %(interface)s\" 用インターフェース統計(ifstats)が取得できません"
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr "接続に際し予期しないエラーが発生しました。"
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr "インスタンス %s が見つかりました。"
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2311,10 +2256,6 @@ msgstr "%(host)s:%(port)s 上で %(arg0)s を開始しています"
msgid "You must implement __call__"
msgstr "__call__ を実装しなければなりません"
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr "インスタンスモニタを開始しています"
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr "IP アドレスをリースしました"
diff --git a/po/nova.pot b/po/nova.pot
index 58140302d..e180ed750 100644
--- a/po/nova.pot
+++ b/po/nova.pot
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr ""
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr ""
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr ""
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr ""
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr ""
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr ""
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1778,34 +1751,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr ""
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr ""
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2263,10 +2208,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/pt_BR.po b/po/pt_BR.po
index f067a69e0..b3aefce44 100644
--- a/po/pt_BR.po
+++ b/po/pt_BR.po
@@ -126,34 +126,6 @@ msgstr "compute.api::suspend %s"
msgid "compute.api::resume %s"
msgstr "compute.api::resume %s"
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr "Número errado de argumentos."
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr ""
-"Arquivo do id do processo (pidfile) %s não existe. O Daemon está parado?\n"
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr "Processo inexistente"
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr "Servindo %s"
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr "Conjunto completo de FLAGS:"
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr "Iniciando %s"
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1804,34 +1776,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr ""
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr ""
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2290,10 +2234,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/ru.po b/po/ru.po
index 5d8532f2e..1bf672fc3 100644
--- a/po/ru.po
+++ b/po/ru.po
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr "Неверное число аргументов."
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr "pidfile %s не обнаружен. Демон не запущен?\n"
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr ""
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr ""
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr ""
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr "Запускается %s"
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1779,34 +1752,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr "обновление %s..."
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr "неожиданная ошибка во время обновления"
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2264,10 +2209,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/tl.po b/po/tl.po
index 7f600e8f1..1ae59330b 100644
--- a/po/tl.po
+++ b/po/tl.po
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr ""
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr ""
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr ""
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr ""
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr ""
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr ""
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1778,34 +1751,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr ""
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr ""
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2265,10 +2210,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/uk.po b/po/uk.po
index bbd831de9..481851e1c 100644
--- a/po/uk.po
+++ b/po/uk.po
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr ""
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr ""
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr ""
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr "Обслуговування %s"
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr ""
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr "Запускається %s"
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1778,34 +1751,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr ""
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr ""
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2263,10 +2208,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/zh_CN.po b/po/zh_CN.po
index c3d292a93..d0ddcd2f7 100644
--- a/po/zh_CN.po
+++ b/po/zh_CN.po
@@ -17,11 +17,6 @@ msgstr ""
"X-Launchpad-Export-Date: 2011-07-23 05:12+0000\n"
"X-Generator: Launchpad (build 13405)\n"
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr "启动 %s 中"
-
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
#: ../nova/scheduler/simple.py:122
@@ -135,28 +130,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr "错误参数个数。"
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr "pidfile %s 不存在,守护进程是否运行?\n"
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr "没有该进程"
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr "正在为 %s 服务"
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr "FLAGS全集:"
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1785,34 +1758,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr ""
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr ""
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2270,10 +2215,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/po/zh_TW.po b/po/zh_TW.po
index ad14c0e32..896e69618 100644
--- a/po/zh_TW.po
+++ b/po/zh_TW.po
@@ -125,33 +125,6 @@ msgstr ""
msgid "compute.api::resume %s"
msgstr ""
-#: ../nova/twistd.py:157
-msgid "Wrong number of arguments."
-msgstr ""
-
-#: ../nova/twistd.py:209
-#, python-format
-msgid "pidfile %s does not exist. Daemon not running?\n"
-msgstr "pidfile %s 不存在. Daemon未啟動?\n"
-
-#: ../nova/twistd.py:221
-msgid "No such process"
-msgstr "沒有此一程序"
-
-#: ../nova/twistd.py:230 ../nova/service.py:224
-#, python-format
-msgid "Serving %s"
-msgstr ""
-
-#: ../nova/twistd.py:262 ../nova/service.py:225
-msgid "Full set of FLAGS:"
-msgstr ""
-
-#: ../nova/twistd.py:266
-#, python-format
-msgid "Starting %s"
-msgstr "正在啟動 %s"
-
#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
#: ../nova/api/ec2/__init__.py:317
@@ -1778,34 +1751,6 @@ msgstr ""
msgid "Got exception: %s"
msgstr ""
-#: ../nova/compute/monitor.py:259
-#, python-format
-msgid "updating %s..."
-msgstr ""
-
-#: ../nova/compute/monitor.py:289
-msgid "unexpected error during update"
-msgstr ""
-
-#: ../nova/compute/monitor.py:356
-#, python-format
-msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:379
-#, python-format
-msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\""
-msgstr ""
-
-#: ../nova/compute/monitor.py:414
-msgid "unexpected exception getting connection"
-msgstr ""
-
-#: ../nova/compute/monitor.py:429
-#, python-format
-msgid "Found instance: %s"
-msgstr ""
-
#: ../nova/volume/san.py:67
#, python-format
msgid "Could not find iSCSI export for volume %s"
@@ -2263,10 +2208,6 @@ msgstr ""
msgid "You must implement __call__"
msgstr ""
-#: ../bin/nova-instancemonitor.py:55
-msgid "Starting instance monitor"
-msgstr ""
-
#: ../bin/nova-dhcpbridge.py:58
msgid "leasing ip"
msgstr ""
diff --git a/setup.py b/setup.py
index c165f40d7..8121e250b 100644
--- a/setup.py
+++ b/setup.py
@@ -124,7 +124,6 @@ setup(name='nova',
'bin/nova-dhcpbridge',
'bin/nova-direct-api',
'bin/nova-import-canonical-imagestore',
- 'bin/nova-instancemonitor',
'bin/nova-logspool',
'bin/nova-manage',
'bin/nova-network',
diff --git a/tools/eventlet-patch b/tools/eventlet-patch
deleted file mode 100644
index c87c5f279..000000000
--- a/tools/eventlet-patch
+++ /dev/null
@@ -1,24 +0,0 @@
-# HG changeset patch
-# User Soren Hansen <soren@linux2go.dk>
-# Date 1297678255 -3600
-# Node ID 4c846d555010bb5a91ab4da78dfe596451313742
-# Parent 5b7e9946c79f005c028eb63207cf5eb7bb21d1c3
-Don't attempt to wrap GreenPipes in GreenPipe
-
-If the os module is monkeypatched, Python's standard subprocess module
-will return greenio.GreenPipe instances for Popen objects' stdin, stdout,
-and stderr attributes. However, eventlet.green.subprocess tries to wrap
-these attributes in another greenio.GreenPipe, which GreenPipe refuses.
-
-diff -r 5b7e9946c79f -r 4c846d555010 eventlet/green/subprocess.py
---- a/eventlet/green/subprocess.py Sat Feb 05 13:05:05 2011 -0800
-+++ b/eventlet/green/subprocess.py Mon Feb 14 11:10:55 2011 +0100
-@@ -27,7 +27,7 @@
- # eventlet.processes.Process.run() method.
- for attr in "stdin", "stdout", "stderr":
- pipe = getattr(self, attr)
-- if pipe is not None:
-+ if pipe is not None and not type(pipe) == greenio.GreenPipe:
- wrapped_pipe = greenio.GreenPipe(pipe, pipe.mode, bufsize)
- setattr(self, attr, wrapped_pipe)
- __init__.__doc__ = subprocess_orig.Popen.__init__.__doc__
diff --git a/tools/install_venv.py b/tools/install_venv.py
index f4b6583ed..3c2f6979f 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -31,7 +31,6 @@ import sys
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.nova-venv')
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
-TWISTED_NOVA = 'http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz'
PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
@@ -106,20 +105,12 @@ def install_dependencies(venv=VENV):
'greenlet'], redirect_output=False)
run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r',
PIP_REQUIRES], redirect_output=False)
- run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv,
- TWISTED_NOVA], redirect_output=False)
# Tell the virtual env how to "import nova"
pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
"nova.pth")
f = open(pthfile, 'w')
f.write("%s\n" % ROOT)
- # Patch eventlet (see FAQ # 1485)
- patchsrc = os.path.join(ROOT, 'tools', 'eventlet-patch')
- patchfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
- "eventlet", "green", "subprocess.py")
- patch_cmd = "patch %s %s" % (patchfile, patchsrc)
- os.system(patch_cmd)
def print_help():
diff --git a/tools/pip-requires b/tools/pip-requires
index dec93c351..23e707034 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -9,7 +9,7 @@ boto==1.9b
carrot==0.10.5
eventlet
lockfile==0.8
-python-novaclient==2.5.7
+python-novaclient==2.5.9
python-daemon==1.5.5
python-gflags==1.3
redis==2.0.0
@@ -20,13 +20,13 @@ mox==0.5.3
greenlet==0.3.1
nose
bzr
-Twisted>=10.1.0
PasteDeploy
paste
sqlalchemy-migrate
netaddr
sphinx
glance
+xattr>=0.6.0
nova-adminclient
suds==0.4
coverage