summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorLorin Hochstein <lorin@isi.edu>2011-06-16 16:48:20 -0400
committerLorin Hochstein <lorin@isi.edu>2011-06-16 16:48:20 -0400
commita51b2ed4ed4840262bd07d5dfc11e33a32e1accd (patch)
tree1364ed7aac86f805ddba903efa0b43afc6e61112 /nova
parentd68f6de8d8275ec6dd9f231b9b52971f2ad15263 (diff)
parent1763419a3f6b01bb0ef98c700f0f350e756b359c (diff)
downloadnova-a51b2ed4ed4840262bd07d5dfc11e33a32e1accd.tar.gz
nova-a51b2ed4ed4840262bd07d5dfc11e33a32e1accd.tar.xz
nova-a51b2ed4ed4840262bd07d5dfc11e33a32e1accd.zip
Upstream merge
Diffstat (limited to 'nova')
-rw-r--r--nova/api/direct.py14
-rw-r--r--nova/api/ec2/__init__.py1
-rw-r--r--nova/api/ec2/admin.py7
-rw-r--r--nova/api/ec2/cloud.py55
-rw-r--r--nova/api/openstack/__init__.py94
-rw-r--r--nova/api/openstack/accounts.py33
-rw-r--r--nova/api/openstack/auth.py22
-rw-r--r--nova/api/openstack/backup_schedules.py27
-rw-r--r--nova/api/openstack/common.py83
-rw-r--r--nova/api/openstack/consoles.py29
-rw-r--r--nova/api/openstack/contrib/__init__.py2
-rw-r--r--nova/api/openstack/contrib/volumes.py23
-rw-r--r--nova/api/openstack/create_instance_helper.py346
-rw-r--r--nova/api/openstack/extensions.py95
-rw-r--r--nova/api/openstack/faults.py39
-rw-r--r--nova/api/openstack/flavors.py34
-rw-r--r--nova/api/openstack/image_metadata.py23
-rw-r--r--nova/api/openstack/images.py161
-rw-r--r--nova/api/openstack/ips.py33
-rw-r--r--nova/api/openstack/limits.py51
-rw-r--r--nova/api/openstack/ratelimiting/__init__.py2
-rw-r--r--nova/api/openstack/server_metadata.py32
-rw-r--r--nova/api/openstack/servers.py451
-rw-r--r--nova/api/openstack/shared_ip_groups.py28
-rw-r--r--nova/api/openstack/users.py43
-rw-r--r--nova/api/openstack/versions.py43
-rw-r--r--nova/api/openstack/views/limits.py9
-rw-r--r--nova/api/openstack/views/servers.py28
-rw-r--r--nova/api/openstack/wsgi.py395
-rw-r--r--nova/api/openstack/zones.py123
-rw-r--r--nova/auth/ldapdriver.py92
-rw-r--r--nova/auth/manager.py16
-rw-r--r--nova/auth/novarc.template5
-rw-r--r--nova/compute/api.py259
-rw-r--r--nova/compute/instance_types.py2
-rw-r--r--nova/compute/manager.py16
-rw-r--r--nova/compute/monitor.py3
-rw-r--r--nova/console/vmrc.py2
-rw-r--r--nova/context.py1
-rw-r--r--nova/crypto.py3
-rw-r--r--nova/db/sqlalchemy/api.py87
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py12
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py29
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py40
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py65
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py45
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/024_add_instance_type_extra_specs.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/021_add_instance_type_extra_specs.py)0
-rw-r--r--nova/db/sqlalchemy/models.py15
-rw-r--r--nova/exception.py30
-rw-r--r--nova/flags.py9
-rw-r--r--nova/image/__init__.py98
-rw-r--r--nova/image/fake.py87
-rw-r--r--nova/image/glance.py51
-rw-r--r--nova/image/local.py6
-rw-r--r--nova/image/s3.py5
-rw-r--r--nova/log.py10
-rw-r--r--nova/network/manager.py13
-rw-r--r--nova/notifier/api.py7
-rw-r--r--nova/objectstore/s3server.py2
-rw-r--r--nova/rpc.py1
-rw-r--r--nova/scheduler/api.py20
-rw-r--r--nova/scheduler/driver.py3
-rw-r--r--nova/scheduler/host_filter.py3
-rw-r--r--nova/scheduler/least_cost.py156
-rw-r--r--nova/scheduler/manager.py12
-rw-r--r--nova/scheduler/simple.py11
-rw-r--r--nova/scheduler/zone_aware_scheduler.py183
-rw-r--r--nova/scheduler/zone_manager.py18
-rw-r--r--nova/test.py22
-rw-r--r--nova/tests/api/openstack/extensions/foxinsocks.py4
-rw-r--r--nova/tests/api/openstack/fakes.py54
-rw-r--r--nova/tests/api/openstack/test_api.py21
-rw-r--r--nova/tests/api/openstack/test_auth.py65
-rw-r--r--nova/tests/api/openstack/test_common.py181
-rw-r--r--nova/tests/api/openstack/test_extensions.py4
-rw-r--r--nova/tests/api/openstack/test_images.py228
-rw-r--r--nova/tests/api/openstack/test_limits.py4
-rw-r--r--nova/tests/api/openstack/test_server_metadata.py25
-rw-r--r--nova/tests/api/openstack/test_servers.py268
-rw-r--r--nova/tests/api/openstack/test_wsgi.py305
-rw-r--r--nova/tests/api/openstack/test_zones.py6
-rw-r--r--nova/tests/api/test_wsgi.py189
-rw-r--r--nova/tests/glance/stubs.py11
-rw-r--r--nova/tests/image/test_glance.py6
-rw-r--r--nova/tests/integrated/integrated_helpers.py20
-rw-r--r--nova/tests/integrated/test_servers.py6
-rw-r--r--nova/tests/integrated/test_xml.py4
-rw-r--r--nova/tests/scheduler/__init__.py0
-rw-r--r--nova/tests/scheduler/test_host_filter.py206
-rw-r--r--nova/tests/scheduler/test_least_cost_scheduler.py144
-rw-r--r--nova/tests/scheduler/test_scheduler.py (renamed from nova/tests/test_scheduler.py)27
-rw-r--r--nova/tests/scheduler/test_zone_aware_scheduler.py296
-rw-r--r--nova/tests/test_auth.py1
-rw-r--r--nova/tests/test_cloud.py91
-rw-r--r--nova/tests/test_compute.py13
-rw-r--r--nova/tests/test_console.py2
-rw-r--r--nova/tests/test_crypto.py83
-rw-r--r--nova/tests/test_host_filter.py13
-rw-r--r--nova/tests/test_libvirt.py208
-rw-r--r--nova/tests/test_middleware.py1
-rw-r--r--nova/tests/test_misc.py13
-rw-r--r--nova/tests/test_notifier.py25
-rw-r--r--nova/tests/test_quota.py12
-rw-r--r--nova/tests/test_vmwareapi.py7
-rw-r--r--nova/tests/test_xenapi.py31
-rw-r--r--nova/tests/test_zone_aware_scheduler.py121
-rw-r--r--nova/tests/vmwareapi/db_fakes.py4
-rw-r--r--nova/tests/xenapi/stubs.py18
-rw-r--r--nova/twistd.py6
-rw-r--r--nova/utils.py20
-rw-r--r--nova/virt/hyperv.py2
-rw-r--r--nova/virt/images.py16
-rw-r--r--nova/virt/libvirt/connection.py28
-rw-r--r--nova/virt/libvirt/firewall.py30
-rw-r--r--nova/virt/vmwareapi/vmops.py12
-rw-r--r--nova/virt/vmwareapi/vmware_images.py16
-rw-r--r--nova/virt/xenapi/fake.py10
-rw-r--r--nova/virt/xenapi/vm_utils.py33
-rw-r--r--nova/virt/xenapi/vmops.py25
-rw-r--r--nova/volume/api.py5
-rw-r--r--nova/volume/manager.py4
-rw-r--r--nova/wsgi.py252
122 files changed, 4939 insertions, 2002 deletions
diff --git a/nova/api/direct.py b/nova/api/direct.py
index 8ceae299c..ec79151b1 100644
--- a/nova/api/direct.py
+++ b/nova/api/direct.py
@@ -42,6 +42,7 @@ from nova import exception
from nova import flags
from nova import utils
from nova import wsgi
+import nova.api.openstack.wsgi
# Global storage for registering modules.
@@ -251,7 +252,7 @@ class Reflection(object):
return self._methods[method]
-class ServiceWrapper(wsgi.Controller):
+class ServiceWrapper(object):
"""Wrapper to dynamically povide a WSGI controller for arbitrary objects.
With lightweight introspection allows public methods on the object to
@@ -265,7 +266,7 @@ class ServiceWrapper(wsgi.Controller):
def __init__(self, service_handle):
self.service_handle = service_handle
- @webob.dec.wsgify(RequestClass=wsgi.Request)
+ @webob.dec.wsgify(RequestClass=nova.api.openstack.wsgi.Request)
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict['action']
@@ -289,8 +290,11 @@ class ServiceWrapper(wsgi.Controller):
try:
content_type = req.best_match_content_type()
- default_xmlns = self.get_default_xmlns(req)
- return self._serialize(result, content_type, default_xmlns)
+ serializer = {
+ 'application/xml': nova.api.openstack.wsgi.XMLDictSerializer(),
+ 'application/json': nova.api.openstack.wsgi.JSONDictSerializer(),
+ }[content_type]
+ return serializer.serialize(result)
except:
raise exception.Error("returned non-serializable type: %s"
% result)
@@ -320,7 +324,7 @@ class Limited(object):
def __init__(self, proxy):
self._proxy = proxy
- if not self.__doc__:
+ if not self.__doc__: # pylint: disable=E0203
self.__doc__ = proxy.__doc__
if not self._allowed:
self._allowed = []
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 1915d007d..890d57fe7 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -242,6 +242,7 @@ class Authorizer(wsgi.Middleware):
'CreateKeyPair': ['all'],
'DeleteKeyPair': ['all'],
'DescribeSecurityGroups': ['all'],
+ 'ImportPublicKey': ['all'],
'AuthorizeSecurityGroupIngress': ['netadmin'],
'RevokeSecurityGroupIngress': ['netadmin'],
'CreateSecurityGroup': ['netadmin'],
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index ea94d9c1f..57d0a0339 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -21,7 +21,6 @@ Admin API controller, exposed through http via the api worker.
"""
import base64
-import datetime
from nova import db
from nova import exception
@@ -305,7 +304,7 @@ class AdminController(object):
* Volume Count
"""
services = db.service_get_all(context, False)
- now = datetime.datetime.utcnow()
+ now = utils.utcnow()
hosts = []
rv = []
for host in [service['host'] for service in services]:
@@ -325,7 +324,3 @@ class AdminController(object):
rv.append(host_dict(host, compute, instances, volume, volumes,
now))
return {'hosts': rv}
-
- def describe_host(self, _context, name, **_kwargs):
- """Returns status info for single node."""
- return host_dict(db.host_get(name))
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 79cc3b3bf..e1c65ae40 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -23,7 +23,6 @@ datastore.
"""
import base64
-import datetime
import IPy
import os
import urllib
@@ -40,6 +39,7 @@ from nova import flags
from nova import ipv6
from nova import log as logging
from nova import network
+from nova import rpc
from nova import utils
from nova import volume
from nova.api.ec2 import ec2utils
@@ -137,6 +137,13 @@ class CloudController(object):
return services[0]['availability_zone']
return 'unknown zone'
+ def _get_image_state(self, image):
+ # NOTE(vish): fallback status if image_state isn't set
+ state = image.get('status')
+ if state == 'active':
+ state = 'available'
+ return image['properties'].get('image_state', state)
+
def get_metadata(self, address):
ctxt = context.get_admin_context()
instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address)
@@ -159,7 +166,7 @@ class CloudController(object):
floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
ec2_id = ec2utils.id_to_ec2_id(instance_ref['id'])
- image_ec2_id = self.image_ec2_id(instance_ref['image_id'])
+ image_ec2_id = self.image_ec2_id(instance_ref['image_ref'])
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
@@ -235,7 +242,7 @@ class CloudController(object):
'zoneState': 'available'}]}
services = db.service_get_all(context, False)
- now = datetime.datetime.utcnow()
+ now = utils.utcnow()
hosts = []
for host in [service['host'] for service in services]:
if not host in hosts:
@@ -595,7 +602,7 @@ class CloudController(object):
instance_id = ec2utils.ec2_id_to_id(ec2_id)
output = self.compute_api.get_console_output(
context, instance_id=instance_id)
- now = datetime.datetime.utcnow()
+ now = utils.utcnow()
return {"InstanceId": ec2_id,
"Timestamp": now,
"output": base64.b64encode(output)}
@@ -774,13 +781,13 @@ class CloudController(object):
instances = self.compute_api.get_all(context, **kwargs)
for instance in instances:
if not context.is_admin:
- if instance['image_id'] == str(FLAGS.vpn_image_id):
+ if instance['image_ref'] == str(FLAGS.vpn_image_id):
continue
i = {}
instance_id = instance['id']
ec2_id = ec2utils.id_to_ec2_id(instance_id)
i['instanceId'] = ec2_id
- i['imageId'] = self.image_ec2_id(instance['image_id'])
+ i['imageId'] = self.image_ec2_id(instance['image_ref'])
i['instanceState'] = {
'code': instance['state'],
'name': instance['state_description']}
@@ -866,8 +873,14 @@ class CloudController(object):
def allocate_address(self, context, **kwargs):
LOG.audit(_("Allocate address"), context=context)
- public_ip = self.network_api.allocate_floating_ip(context)
- return {'publicIp': public_ip}
+ try:
+ public_ip = self.network_api.allocate_floating_ip(context)
+ return {'publicIp': public_ip}
+ except rpc.RemoteError as ex:
+ if ex.exc_type == 'NoMoreAddresses':
+ raise exception.NoMoreFloatingIps()
+ else:
+ raise
def release_address(self, context, public_ip, **kwargs):
LOG.audit(_("Release address %s"), public_ip, context=context)
@@ -896,10 +909,20 @@ class CloudController(object):
if kwargs.get('ramdisk_id'):
ramdisk = self._get_image(context, kwargs['ramdisk_id'])
kwargs['ramdisk_id'] = ramdisk['id']
+ image = self._get_image(context, kwargs['image_id'])
+
+ if image:
+ image_state = self._get_image_state(image)
+ else:
+ raise exception.ImageNotFound(image_id=kwargs['image_id'])
+
+ if image_state != 'available':
+ raise exception.ApiError(_('Image must be available'))
+
instances = self.compute_api.create(context,
instance_type=instance_types.get_instance_type_by_name(
kwargs.get('instance_type', None)),
- image_id=self._get_image(context, kwargs['image_id'])['id'],
+ image_href=self._get_image(context, kwargs['image_id'])['id'],
min_count=int(kwargs.get('min_count', max_count)),
max_count=max_count,
kernel_id=kwargs.get('kernel_id'),
@@ -975,7 +998,12 @@ class CloudController(object):
def image_ec2_id(image_id, image_type='ami'):
"""Returns image ec2_id using id and three letter type."""
template = image_type + '-%08x'
- return ec2utils.id_to_ec2_id(int(image_id), template=template)
+ try:
+ return ec2utils.id_to_ec2_id(int(image_id), template=template)
+ except ValueError:
+ #TODO(wwolf): once we have ec2_id -> glance_id mapping
+ # in place, this wont be necessary
+ return "ami-00000000"
def _get_image(self, context, ec2_id):
try:
@@ -1006,11 +1034,8 @@ class CloudController(object):
get('image_location'), name)
else:
i['imageLocation'] = image['properties'].get('image_location')
- # NOTE(vish): fallback status if image_state isn't set
- state = image.get('status')
- if state == 'active':
- state = 'available'
- i['imageState'] = image['properties'].get('image_state', state)
+
+ i['imageState'] = self._get_image_state(image)
i['displayName'] = name
i['description'] = image.get('description')
display_mapping = {'aki': 'kernel',
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 5b7f080ad..ddd9580d7 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -26,7 +26,7 @@ import webob.exc
from nova import flags
from nova import log as logging
-from nova import wsgi
+from nova import wsgi as base_wsgi
from nova.api.openstack import accounts
from nova.api.openstack import faults
from nova.api.openstack import backup_schedules
@@ -40,6 +40,7 @@ from nova.api.openstack import servers
from nova.api.openstack import server_metadata
from nova.api.openstack import shared_ip_groups
from nova.api.openstack import users
+from nova.api.openstack import wsgi
from nova.api.openstack import zones
@@ -50,7 +51,7 @@ flags.DEFINE_bool('allow_admin_api',
'When True, this API service will accept admin operations.')
-class FaultWrapper(wsgi.Middleware):
+class FaultWrapper(base_wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
@@ -63,7 +64,7 @@ class FaultWrapper(wsgi.Middleware):
return faults.Fault(exc)
-class APIRouter(wsgi.Router):
+class APIRouter(base_wsgi.Router):
"""
Routes requests on the OpenStack API to the appropriate controller
and method.
@@ -80,7 +81,9 @@ class APIRouter(wsgi.Router):
self._setup_routes(mapper)
super(APIRouter, self).__init__(mapper)
- def _setup_routes(self, mapper):
+ def _setup_routes(self, mapper, version):
+ """Routes common to all versions."""
+
server_members = self.server_members
server_members['action'] = 'POST'
if FLAGS.allow_admin_api:
@@ -97,56 +100,65 @@ class APIRouter(wsgi.Router):
server_members['reset_network'] = 'POST'
server_members['inject_network_info'] = 'POST'
- mapper.resource("zone", "zones", controller=zones.Controller(),
- collection={'detail': 'GET', 'info': 'GET',
- 'select': 'GET'})
-
- mapper.resource("user", "users", controller=users.Controller(),
+ mapper.resource("user", "users",
+ controller=users.create_resource(),
collection={'detail': 'GET'})
mapper.resource("account", "accounts",
- controller=accounts.Controller(),
+ controller=accounts.create_resource(),
collection={'detail': 'GET'})
- mapper.resource("console", "consoles",
- controller=consoles.Controller(),
- parent_resource=dict(member_name='server',
- collection_name='servers'))
-
- super(APIRouter, self).__init__(mapper)
+ mapper.resource("zone", "zones",
+ controller=zones.create_resource(version),
+ collection={'detail': 'GET',
+ 'info': 'GET',
+ 'select': 'POST',
+ 'boot': 'POST'
+ })
+ mapper.resource("console", "consoles",
+ controller=consoles.create_resource(),
+ parent_resource=dict(member_name='server',
+ collection_name='servers'))
-class APIRouterV10(APIRouter):
- """Define routes specific to OpenStack API V1.0."""
-
- def _setup_routes(self, mapper):
- super(APIRouterV10, self)._setup_routes(mapper)
mapper.resource("server", "servers",
- controller=servers.ControllerV10(),
+ controller=servers.create_resource(version),
collection={'detail': 'GET'},
member=self.server_members)
mapper.resource("image", "images",
- controller=images.ControllerV10(),
+ controller=images.create_resource(version),
collection={'detail': 'GET'})
+ mapper.resource("limit", "limits",
+ controller=limits.create_resource(version))
+
mapper.resource("flavor", "flavors",
- controller=flavors.ControllerV10(),
+ controller=flavors.create_resource(version),
+ collection={'detail': 'GET'})
+
+ super(APIRouter, self).__init__(mapper)
+
+
+class APIRouterV10(APIRouter):
+ """Define routes specific to OpenStack API V1.0."""
+
+ def _setup_routes(self, mapper):
+ super(APIRouterV10, self)._setup_routes(mapper, '1.0')
+ mapper.resource("image", "images",
+ controller=images.create_resource('1.0'),
collection={'detail': 'GET'})
mapper.resource("shared_ip_group", "shared_ip_groups",
collection={'detail': 'GET'},
- controller=shared_ip_groups.Controller())
+ controller=shared_ip_groups.create_resource())
mapper.resource("backup_schedule", "backup_schedule",
- controller=backup_schedules.Controller(),
+ controller=backup_schedules.create_resource(),
parent_resource=dict(member_name='server',
collection_name='servers'))
- mapper.resource("limit", "limits",
- controller=limits.LimitsControllerV10())
-
- mapper.resource("ip", "ips", controller=ips.Controller(),
+ mapper.resource("ip", "ips", controller=ips.create_resource(),
collection=dict(public='GET', private='GET'),
parent_resource=dict(member_name='server',
collection_name='servers'))
@@ -156,29 +168,13 @@ class APIRouterV11(APIRouter):
"""Define routes specific to OpenStack API V1.1."""
def _setup_routes(self, mapper):
- super(APIRouterV11, self)._setup_routes(mapper)
- mapper.resource("server", "servers",
- controller=servers.ControllerV11(),
- collection={'detail': 'GET'},
- member=self.server_members)
-
- mapper.resource("image", "images",
- controller=images.ControllerV11(),
- collection={'detail': 'GET'})
-
+ super(APIRouterV11, self)._setup_routes(mapper, '1.1')
mapper.resource("image_meta", "meta",
- controller=image_metadata.Controller(),
+ controller=image_metadata.create_resource(),
parent_resource=dict(member_name='image',
collection_name='images'))
mapper.resource("server_meta", "meta",
- controller=server_metadata.Controller(),
+ controller=server_metadata.create_resource(),
parent_resource=dict(member_name='server',
collection_name='servers'))
-
- mapper.resource("flavor", "flavors",
- controller=flavors.ControllerV11(),
- collection={'detail': 'GET'})
-
- mapper.resource("limit", "limits",
- controller=limits.LimitsControllerV11())
diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py
index 00fdd4540..0dcd37217 100644
--- a/nova/api/openstack/accounts.py
+++ b/nova/api/openstack/accounts.py
@@ -20,8 +20,9 @@ from nova import flags
from nova import log as logging
from nova.auth import manager
-from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
+
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.api.openstack')
@@ -34,12 +35,7 @@ def _translate_keys(account):
manager=account.project_manager_id)
-class Controller(common.OpenstackController):
-
- _serialization_metadata = {
- 'application/xml': {
- "attributes": {
- "account": ["id", "name", "description", "manager"]}}}
+class Controller(object):
def __init__(self):
self.manager = manager.AuthManager()
@@ -66,20 +62,33 @@ class Controller(common.OpenstackController):
self.manager.delete_project(id)
return {}
- def create(self, req):
+ def create(self, req, body):
"""We use update with create-or-update semantics
because the id comes from an external source"""
raise faults.Fault(webob.exc.HTTPNotImplemented())
- def update(self, req, id):
+ def update(self, req, id, body):
"""This is really create or update."""
self._check_admin(req.environ['nova.context'])
- env = self._deserialize(req.body, req.get_content_type())
- description = env['account'].get('description')
- manager = env['account'].get('manager')
+ description = body['account'].get('description')
+ manager = body['account'].get('manager')
try:
account = self.manager.get_project(id)
self.manager.modify_project(id, manager, description)
except exception.NotFound:
account = self.manager.create_project(id, manager, description)
return dict(account=_translate_keys(account))
+
+
+def create_resource():
+ metadata = {
+ "attributes": {
+ "account": ["id", "name", "description", "manager"],
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 6c6ee22a2..7c3e683d6 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -13,9 +13,8 @@
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
-# under the License.import datetime
+# under the License.
-import datetime
import hashlib
import time
@@ -50,19 +49,22 @@ class AuthMiddleware(wsgi.Middleware):
if not self.has_authentication(req):
return self.authenticate(req)
user = self.get_user_by_authentication(req)
- accounts = self.auth.get_projects(user=user)
if not user:
token = req.headers["X-Auth-Token"]
msg = _("%(user)s could not be found with token '%(token)s'")
LOG.warn(msg % locals())
return faults.Fault(webob.exc.HTTPUnauthorized())
- if accounts:
- #we are punting on this til auth is settled,
- #and possibly til api v1.1 (mdragon)
- account = accounts[0]
- else:
- return faults.Fault(webob.exc.HTTPUnauthorized())
+ try:
+ account = req.headers["X-Auth-Project-Id"]
+ except KeyError:
+ # FIXME(usrleon): It needed only for compatibility
+ # while osapi clients don't use this header
+ accounts = self.auth.get_projects(user=user)
+ if accounts:
+ account = accounts[0]
+ else:
+ return faults.Fault(webob.exc.HTTPUnauthorized())
if not self.auth.is_admin(user) and \
not self.auth.is_project_member(user, account):
@@ -127,7 +129,7 @@ class AuthMiddleware(wsgi.Middleware):
except exception.NotFound:
return None
if token:
- delta = datetime.datetime.utcnow() - token['created_at']
+ delta = utils.utcnow() - token['created_at']
if delta.days >= 2:
self.db.auth_token_destroy(ctxt, token['token_hash'])
else:
diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py
index 4bf744046..71a14d4ce 100644
--- a/nova/api/openstack/backup_schedules.py
+++ b/nova/api/openstack/backup_schedules.py
@@ -19,9 +19,8 @@ import time
from webob import exc
-from nova.api.openstack import common
from nova.api.openstack import faults
-import nova.image.service
+from nova.api.openstack import wsgi
def _translate_keys(inst):
@@ -29,14 +28,9 @@ def _translate_keys(inst):
return dict(backupSchedule=inst)
-class Controller(common.OpenstackController):
+class Controller(object):
""" The backup schedule API controller for the Openstack API """
- _serialization_metadata = {
- 'application/xml': {
- 'attributes': {
- 'backupSchedule': []}}}
-
def __init__(self):
pass
@@ -48,7 +42,7 @@ class Controller(common.OpenstackController):
""" Returns a single backup schedule for a given instance """
return faults.Fault(exc.HTTPNotImplemented())
- def create(self, req, server_id):
+ def create(self, req, server_id, body):
""" No actual update method required, since the existing API allows
both create and update through a POST """
return faults.Fault(exc.HTTPNotImplemented())
@@ -56,3 +50,18 @@ class Controller(common.OpenstackController):
def delete(self, req, server_id, id):
""" Deletes an existing backup schedule """
return faults.Fault(exc.HTTPNotImplemented())
+
+
+def create_resource():
+ metadata = {
+ 'attributes': {
+ 'backupSchedule': [],
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10,
+ metadata=metadata),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 32cd689ca..4da7ec0ef 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -23,12 +23,9 @@ import webob
from nova import exception
from nova import flags
from nova import log as logging
-from nova import wsgi
LOG = logging.getLogger('nova.api.openstack.common')
-
-
FLAGS = flags.FLAGS
@@ -36,6 +33,37 @@ XML_NS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
+def get_pagination_params(request):
+ """Return marker, limit tuple from request.
+
+ :param request: `wsgi.Request` possibly containing 'marker' and 'limit'
+ GET variables. 'marker' is the id of the last element
+ the client has seen, and 'limit' is the maximum number
+ of items to return. If 'limit' is not specified, 0, or
+ > max_limit, we default to max_limit. Negative values
+ for either marker or limit will cause
+ exc.HTTPBadRequest() exceptions to be raised.
+
+ """
+ try:
+ marker = int(request.GET.get('marker', 0))
+ except ValueError:
+ raise webob.exc.HTTPBadRequest(_('marker param must be an integer'))
+
+ try:
+ limit = int(request.GET.get('limit', 0))
+ except ValueError:
+ raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
+
+ if limit < 0:
+ raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
+
+ if marker < 0:
+ raise webob.exc.HTTPBadRequest(_('marker param must be positive'))
+
+ return(marker, limit)
+
+
def limited(items, request, max_limit=FLAGS.osapi_max_limit):
"""
Return a slice of items according to requested offset and limit.
@@ -72,19 +100,10 @@ def limited(items, request, max_limit=FLAGS.osapi_max_limit):
def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
"""Return a slice of items according to the requested marker and limit."""
+ (marker, limit) = get_pagination_params(request)
- try:
- marker = int(request.GET.get('marker', 0))
- except ValueError:
- raise webob.exc.HTTPBadRequest(_('marker param must be an integer'))
-
- try:
- limit = int(request.GET.get('limit', max_limit))
- except ValueError:
- raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
-
- if limit < 0:
- raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
+ if limit == 0:
+ limit = max_limit
limit = min(max_limit, limit)
start_index = 0
@@ -100,34 +119,6 @@ def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
return items[start_index:range_end]
-def get_image_id_from_image_hash(image_service, context, image_hash):
- """Given an Image ID Hash, return an objectstore Image ID.
-
- image_service - reference to objectstore compatible image service.
- context - security context for image service requests.
- image_hash - hash of the image ID.
- """
-
- # FIX(sandy): This is terribly inefficient. It pulls all images
- # from objectstore in order to find the match. ObjectStore
- # should have a numeric counterpart to the string ID.
- try:
- items = image_service.detail(context)
- except NotImplementedError:
- items = image_service.index(context)
- for image in items:
- image_id = image['id']
- try:
- if abs(hash(image_id)) == int(image_hash):
- return image_id
- except ValueError:
- msg = _("Requested image_id has wrong format: %s,"
- "should have numerical format") % image_id
- LOG.error(msg)
- raise Exception(msg)
- raise exception.ImageNotFound(image_id=image_hash)
-
-
def get_id_from_href(href):
"""Return the id portion of a url as an int.
@@ -146,9 +137,3 @@ def get_id_from_href(href):
except:
LOG.debug(_("Error extracting id from href: %s") % href)
raise webob.exc.HTTPBadRequest(_('could not parse id from href'))
-
-
-class OpenstackController(wsgi.Controller):
- def get_default_xmlns(self, req):
- # Use V10 by default
- return XML_NS_V10
diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py
index 1a77f25d7..bccf04d8f 100644
--- a/nova/api/openstack/consoles.py
+++ b/nova/api/openstack/consoles.py
@@ -19,8 +19,8 @@ from webob import exc
from nova import console
from nova import exception
-from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
def _translate_keys(cons):
@@ -43,17 +43,11 @@ def _translate_detail_keys(cons):
return dict(console=info)
-class Controller(common.OpenstackController):
- """The Consoles Controller for the Openstack API"""
-
- _serialization_metadata = {
- 'application/xml': {
- 'attributes': {
- 'console': []}}}
+class Controller(object):
+ """The Consoles controller for the Openstack API"""
def __init__(self):
self.console_api = console.API()
- super(Controller, self).__init__()
def index(self, req, server_id):
"""Returns a list of consoles for this instance"""
@@ -63,9 +57,8 @@ class Controller(common.OpenstackController):
return dict(consoles=[_translate_keys(console)
for console in consoles])
- def create(self, req, server_id):
+ def create(self, req, server_id, body):
"""Creates a new console"""
- #info = self._deserialize(req.body, req.get_content_type())
self.console_api.create_console(
req.environ['nova.context'],
int(server_id))
@@ -94,3 +87,17 @@ class Controller(common.OpenstackController):
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
+
+
+def create_resource():
+ metadata = {
+ 'attributes': {
+ 'console': [],
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/contrib/__init__.py b/nova/api/openstack/contrib/__init__.py
index b42a1d89d..acb5eb280 100644
--- a/nova/api/openstack/contrib/__init__.py
+++ b/nova/api/openstack/contrib/__init__.py
@@ -13,7 +13,7 @@
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
-# under the License.import datetime
+# under the License.
"""Contrib contains extensions that are shipped with nova.
diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py
index b22bd2846..feabdce89 100644
--- a/nova/api/openstack/contrib/volumes.py
+++ b/nova/api/openstack/contrib/volumes.py
@@ -22,7 +22,6 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import volume
-from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import faults
@@ -64,7 +63,7 @@ def _translate_volume_summary_view(context, vol):
return d
-class VolumeController(wsgi.Controller):
+class VolumeController(object):
"""The Volumes API controller for the OpenStack API."""
_serialization_metadata = {
@@ -124,15 +123,14 @@ class VolumeController(wsgi.Controller):
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
- def create(self, req):
+ def create(self, req, body):
"""Creates a new volume."""
context = req.environ['nova.context']
- env = self._deserialize(req.body, req.get_content_type())
- if not env:
+ if not body:
return faults.Fault(exc.HTTPUnprocessableEntity())
- vol = env['volume']
+ vol = body['volume']
size = vol['size']
LOG.audit(_("Create volume of %s GB"), size, context=context)
new_volume = self.volume_api.create(context, size, None,
@@ -175,7 +173,7 @@ def _translate_attachment_summary_view(_context, vol):
return d
-class VolumeAttachmentController(wsgi.Controller):
+class VolumeAttachmentController(object):
"""The volume attachment API controller for the Openstack API.
A child resource of the server. Note that we use the volume id
@@ -219,17 +217,16 @@ class VolumeAttachmentController(wsgi.Controller):
return {'volumeAttachment': _translate_attachment_detail_view(context,
vol)}
- def create(self, req, server_id):
+ def create(self, req, server_id, body):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
- env = self._deserialize(req.body, req.get_content_type())
- if not env:
+ if not body:
return faults.Fault(exc.HTTPUnprocessableEntity())
instance_id = server_id
- volume_id = env['volumeAttachment']['volumeId']
- device = env['volumeAttachment']['device']
+ volume_id = body['volumeAttachment']['volumeId']
+ device = body['volumeAttachment']['device']
msg = _("Attach volume %(volume_id)s to instance %(server_id)s"
" at %(device)s") % locals()
@@ -259,7 +256,7 @@ class VolumeAttachmentController(wsgi.Controller):
# TODO(justinsb): How do I return "accepted" here?
return {'volumeAttachment': attachment}
- def update(self, _req, _server_id, _id):
+ def update(self, req, server_id, id, body):
"""Update a volume attachment. We don't currently support this."""
return faults.Fault(exc.HTTPBadRequest())
diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py
new file mode 100644
index 000000000..fbc6318ef
--- /dev/null
+++ b/nova/api/openstack/create_instance_helper.py
@@ -0,0 +1,346 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import re
+import webob
+
+from webob import exc
+from xml.dom import minidom
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+import nova.image
+from nova import quota
+from nova import utils
+
+from nova.compute import instance_types
+from nova.api.openstack import faults
+from nova.api.openstack import wsgi
+from nova.auth import manager as auth_manager
+
+
+LOG = logging.getLogger('nova.api.openstack.create_instance_helper')
+FLAGS = flags.FLAGS
+
+
+class CreateFault(exception.NovaException):
+ message = _("Invalid parameters given to create_instance.")
+
+ def __init__(self, fault):
+ self.fault = fault
+ super(CreateFault, self).__init__()
+
+
+class CreateInstanceHelper(object):
+ """This is the base class for OS API Controllers that
+ are capable of creating instances (currently Servers and Zones).
+
+ Once we stabilize the Zones portion of the API we may be able
+ to move this code back into servers.py
+ """
+
+ def __init__(self, controller):
+ """We need the image service to create an instance."""
+ self.controller = controller
+ self._image_service = utils.import_object(FLAGS.image_service)
+ super(CreateInstanceHelper, self).__init__()
+
+ def create_instance(self, req, body, create_method):
+ """Creates a new server for the given user. The approach
+ used depends on the create_method. For example, the standard
+ POST /server call uses compute.api.create(), while
+ POST /zones/server uses compute.api.create_all_at_once().
+
+ The problem is, both approaches return different values (i.e.
+ [instance dicts] vs. reservation_id). So the handling of the
+ return type from this method is left to the caller.
+ """
+ if not body:
+ raise faults.Fault(exc.HTTPUnprocessableEntity())
+
+ context = req.environ['nova.context']
+
+ password = self.controller._get_server_admin_password(body['server'])
+
+ key_name = None
+ key_data = None
+ key_pairs = auth_manager.AuthManager.get_key_pairs(context)
+ if key_pairs:
+ key_pair = key_pairs[0]
+ key_name = key_pair['name']
+ key_data = key_pair['public_key']
+
+ image_href = self.controller._image_ref_from_req_data(body)
+ try:
+ image_service, image_id = nova.image.get_image_service(image_href)
+ kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
+ req, image_id)
+ images = set([str(x['id']) for x in image_service.index(context)])
+ assert str(image_id) in images
+ except Exception, e:
+ msg = _("Cannot find requested image %(image_href)s: %(e)s" %
+ locals())
+ raise faults.Fault(exc.HTTPBadRequest(msg))
+
+ personality = body['server'].get('personality')
+
+ injected_files = []
+ if personality:
+ injected_files = self._get_injected_files(personality)
+
+ flavor_id = self.controller._flavor_id_from_req_data(body)
+
+ if not 'name' in body['server']:
+ msg = _("Server name is not defined")
+ raise exc.HTTPBadRequest(msg)
+
+ zone_blob = body['server'].get('blob')
+ name = body['server']['name']
+ self._validate_server_name(name)
+ name = name.strip()
+
+ reservation_id = body['server'].get('reservation_id')
+
+ try:
+ inst_type = \
+ instance_types.get_instance_type_by_flavor_id(flavor_id)
+ extra_values = {
+ 'instance_type': inst_type,
+ 'image_ref': image_href,
+ 'password': password
+ }
+
+ return (extra_values,
+ create_method(context,
+ inst_type,
+ image_id,
+ kernel_id=kernel_id,
+ ramdisk_id=ramdisk_id,
+ display_name=name,
+ display_description=name,
+ key_name=key_name,
+ key_data=key_data,
+ metadata=body['server'].get('metadata', {}),
+ injected_files=injected_files,
+ admin_password=password,
+ zone_blob=zone_blob,
+ reservation_id=reservation_id
+ )
+ )
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+ except exception.ImageNotFound as error:
+ msg = _("Can not find requested image")
+ raise faults.Fault(exc.HTTPBadRequest(msg))
+
+ # Let the caller deal with unhandled exceptions.
+
+ def _handle_quota_error(self, error):
+ """
+ Reraise quota errors as api-specific http exceptions
+ """
+ if error.code == "OnsetFileLimitExceeded":
+ expl = _("Personality file limit exceeded")
+ raise exc.HTTPBadRequest(explanation=expl)
+ if error.code == "OnsetFilePathLimitExceeded":
+ expl = _("Personality file path too long")
+ raise exc.HTTPBadRequest(explanation=expl)
+ if error.code == "OnsetFileContentLimitExceeded":
+ expl = _("Personality file content too long")
+ raise exc.HTTPBadRequest(explanation=expl)
+ # if the original error is okay, just reraise it
+ raise error
+
+ def _deserialize_create(self, request):
+ """
+ Deserialize a create request
+
+ Overrides normal behavior in the case of xml content
+ """
+ if request.content_type == "application/xml":
+ deserializer = ServerCreateRequestXMLDeserializer()
+ return deserializer.deserialize(request.body)
+ else:
+ return self._deserialize(request.body, request.get_content_type())
+
+ def _validate_server_name(self, value):
+ if not isinstance(value, basestring):
+ msg = _("Server name is not a string or unicode")
+ raise exc.HTTPBadRequest(msg)
+
+ if value.strip() == '':
+ msg = _("Server name is an empty string")
+ raise exc.HTTPBadRequest(msg)
+
+ def _get_kernel_ramdisk_from_image(self, req, image_id):
+ """Fetch an image from the ImageService, then if present, return the
+ associated kernel and ramdisk image IDs.
+ """
+ context = req.environ['nova.context']
+ image_meta = self._image_service.show(context, image_id)
+ # NOTE(sirp): extracted to a separate method to aid unit-testing, the
+ # new method doesn't need a request obj or an ImageService stub
+ kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image(
+ image_meta)
+ return kernel_id, ramdisk_id
+
+ @staticmethod
+ def _do_get_kernel_ramdisk_from_image(image_meta):
+ """Given an ImageService image_meta, return kernel and ramdisk image
+ ids if present.
+
+ This is only valid for `ami` style images.
+ """
+ image_id = image_meta['id']
+ if image_meta['status'] != 'active':
+ raise exception.ImageUnacceptable(image_id=image_id,
+ reason=_("status is not active"))
+
+ if image_meta.get('container_format') != 'ami':
+ return None, None
+
+ try:
+ kernel_id = image_meta['properties']['kernel_id']
+ except KeyError:
+ raise exception.KernelNotFoundForImage(image_id=image_id)
+
+ try:
+ ramdisk_id = image_meta['properties']['ramdisk_id']
+ except KeyError:
+ raise exception.RamdiskNotFoundForImage(image_id=image_id)
+
+ return kernel_id, ramdisk_id
+
+ def _get_injected_files(self, personality):
+ """
+ Create a list of injected files from the personality attribute
+
+ At this time, injected_files must be formatted as a list of
+ (file_path, file_content) pairs for compatibility with the
+ underlying compute service.
+ """
+ injected_files = []
+
+ for item in personality:
+ try:
+ path = item['path']
+ contents = item['contents']
+ except KeyError as key:
+ expl = _('Bad personality format: missing %s') % key
+ raise exc.HTTPBadRequest(explanation=expl)
+ except TypeError:
+ expl = _('Bad personality format')
+ raise exc.HTTPBadRequest(explanation=expl)
+ try:
+ contents = base64.b64decode(contents)
+ except TypeError:
+ expl = _('Personality content for %s cannot be decoded') % path
+ raise exc.HTTPBadRequest(explanation=expl)
+ injected_files.append((path, contents))
+ return injected_files
+
+ def _get_server_admin_password_old_style(self, server):
+ """ Determine the admin password for a server on creation """
+ return utils.generate_password(16)
+
+ def _get_server_admin_password_new_style(self, server):
+ """ Determine the admin password for a server on creation """
+ password = server.get('adminPass')
+
+ if password is None:
+ return utils.generate_password(16)
+ if not isinstance(password, basestring) or password == '':
+ msg = _("Invalid adminPass")
+ raise exc.HTTPBadRequest(msg)
+ return password
+
+
+class ServerXMLDeserializer(wsgi.XMLDeserializer):
+ """
+ Deserializer to handle xml-formatted server create requests.
+
+ Handles standard server attributes as well as optional metadata
+ and personality attributes
+ """
+
+ def create(self, string):
+ """Deserialize an xml-formatted server create request"""
+ dom = minidom.parseString(string)
+ server = self._extract_server(dom)
+ return {'server': server}
+
+ def _extract_server(self, node):
+ """Marshal the server attribute of a parsed request"""
+ server = {}
+ server_node = self._find_first_child_named(node, 'server')
+ for attr in ["name", "imageId", "flavorId", "imageRef", "flavorRef"]:
+ if server_node.getAttribute(attr):
+ server[attr] = server_node.getAttribute(attr)
+ metadata = self._extract_metadata(server_node)
+ if metadata is not None:
+ server["metadata"] = metadata
+ personality = self._extract_personality(server_node)
+ if personality is not None:
+ server["personality"] = personality
+ return server
+
+ def _extract_metadata(self, server_node):
+ """Marshal the metadata attribute of a parsed request"""
+ metadata_node = self._find_first_child_named(server_node, "metadata")
+ if metadata_node is None:
+ return None
+ metadata = {}
+ for meta_node in self._find_children_named(metadata_node, "meta"):
+ key = meta_node.getAttribute("key")
+ metadata[key] = self._extract_text(meta_node)
+ return metadata
+
+ def _extract_personality(self, server_node):
+ """Marshal the personality attribute of a parsed request"""
+ personality_node = \
+ self._find_first_child_named(server_node, "personality")
+ if personality_node is None:
+ return None
+ personality = []
+ for file_node in self._find_children_named(personality_node, "file"):
+ item = {}
+ if file_node.hasAttribute("path"):
+ item["path"] = file_node.getAttribute("path")
+ item["contents"] = self._extract_text(file_node)
+ personality.append(item)
+ return personality
+
+ def _find_first_child_named(self, parent, name):
+ """Search a nodes children for the first child with a given name"""
+ for node in parent.childNodes:
+ if node.nodeName == name:
+ return node
+ return None
+
+ def _find_children_named(self, parent, name):
+ """Return all of a nodes children who have the given name"""
+ for node in parent.childNodes:
+ if node.nodeName == name:
+ yield node
+
+ def _extract_text(self, node):
+ """Get the text field contained by the given node"""
+ if len(node.childNodes) == 1:
+ child = node.childNodes[0]
+ if child.nodeType == child.TEXT_NODE:
+ return child.nodeValue
+ return ""
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index 8e77b25fb..54e17e23d 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -27,9 +27,10 @@ import webob.exc
from nova import exception
from nova import flags
from nova import log as logging
-from nova import wsgi
+from nova import wsgi as base_wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
LOG = logging.getLogger('extensions')
@@ -115,28 +116,34 @@ class ExtensionDescriptor(object):
return request_exts
-class ActionExtensionController(common.OpenstackController):
-
+class ActionExtensionController(object):
def __init__(self, application):
-
self.application = application
self.action_handlers = {}
def add_action(self, action_name, handler):
self.action_handlers[action_name] = handler
- def action(self, req, id):
-
- input_dict = self._deserialize(req.body, req.get_content_type())
+ def action(self, req, id, body):
for action_name, handler in self.action_handlers.iteritems():
- if action_name in input_dict:
- return handler(input_dict, req, id)
+ if action_name in body:
+ return handler(body, req, id)
# no action handler found (bump to downstream application)
res = self.application
return res
-class RequestExtensionController(common.OpenstackController):
+class ActionExtensionResource(wsgi.Resource):
+
+ def __init__(self, application):
+ controller = ActionExtensionController(application)
+ wsgi.Resource.__init__(self, controller)
+
+ def add_action(self, action_name, handler):
+ self.controller.add_action(action_name, handler)
+
+
+class RequestExtensionController(object):
def __init__(self, application):
self.application = application
@@ -153,7 +160,17 @@ class RequestExtensionController(common.OpenstackController):
return res
-class ExtensionController(common.OpenstackController):
+class RequestExtensionResource(wsgi.Resource):
+
+ def __init__(self, application):
+ controller = RequestExtensionController(application)
+ wsgi.Resource.__init__(self, controller)
+
+ def add_handler(self, handler):
+ self.controller.add_handler(handler)
+
+
+class ExtensionsResource(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
@@ -186,7 +203,7 @@ class ExtensionController(common.OpenstackController):
raise faults.Fault(webob.exc.HTTPNotFound())
-class ExtensionMiddleware(wsgi.Middleware):
+class ExtensionMiddleware(base_wsgi.Middleware):
"""Extensions middleware for WSGI."""
@classmethod
def factory(cls, global_config, **local_config):
@@ -195,43 +212,43 @@ class ExtensionMiddleware(wsgi.Middleware):
return cls(app, **local_config)
return _factory
- def _action_ext_controllers(self, application, ext_mgr, mapper):
- """Return a dict of ActionExtensionController-s by collection."""
- action_controllers = {}
+ def _action_ext_resources(self, application, ext_mgr, mapper):
+ """Return a dict of ActionExtensionResource-s by collection."""
+ action_resources = {}
for action in ext_mgr.get_actions():
- if not action.collection in action_controllers.keys():
- controller = ActionExtensionController(application)
+ if not action.collection in action_resources.keys():
+ resource = ActionExtensionResource(application)
mapper.connect("/%s/:(id)/action.:(format)" %
action.collection,
action='action',
- controller=controller,
+ controller=resource,
conditions=dict(method=['POST']))
mapper.connect("/%s/:(id)/action" % action.collection,
action='action',
- controller=controller,
+ controller=resource,
conditions=dict(method=['POST']))
- action_controllers[action.collection] = controller
+ action_resources[action.collection] = resource
- return action_controllers
+ return action_resources
- def _request_ext_controllers(self, application, ext_mgr, mapper):
- """Returns a dict of RequestExtensionController-s by collection."""
- request_ext_controllers = {}
+ def _request_ext_resources(self, application, ext_mgr, mapper):
+ """Returns a dict of RequestExtensionResource-s by collection."""
+ request_ext_resources = {}
for req_ext in ext_mgr.get_request_extensions():
- if not req_ext.key in request_ext_controllers.keys():
- controller = RequestExtensionController(application)
+ if not req_ext.key in request_ext_resources.keys():
+ resource = RequestExtensionResource(application)
mapper.connect(req_ext.url_route + '.:(format)',
action='process',
- controller=controller,
+ controller=resource,
conditions=req_ext.conditions)
mapper.connect(req_ext.url_route,
action='process',
- controller=controller,
+ controller=resource,
conditions=req_ext.conditions)
- request_ext_controllers[req_ext.key] = controller
+ request_ext_resources[req_ext.key] = resource
- return request_ext_controllers
+ return request_ext_resources
def __init__(self, application, ext_mgr=None):
@@ -246,22 +263,22 @@ class ExtensionMiddleware(wsgi.Middleware):
LOG.debug(_('Extended resource: %s'),
resource.collection)
mapper.resource(resource.collection, resource.collection,
- controller=resource.controller,
+ controller=wsgi.Resource(resource.controller),
collection=resource.collection_actions,
member=resource.member_actions,
parent_resource=resource.parent)
# extended actions
- action_controllers = self._action_ext_controllers(application, ext_mgr,
+ action_resources = self._action_ext_resources(application, ext_mgr,
mapper)
for action in ext_mgr.get_actions():
LOG.debug(_('Extended action: %s'), action.action_name)
- controller = action_controllers[action.collection]
- controller.add_action(action.action_name, action.handler)
+ resource = action_resources[action.collection]
+ resource.add_action(action.action_name, action.handler)
# extended requests
- req_controllers = self._request_ext_controllers(application, ext_mgr,
- mapper)
+ req_controllers = self._request_ext_resources(application, ext_mgr,
+ mapper)
for request_ext in ext_mgr.get_request_extensions():
LOG.debug(_('Extended request: %s'), request_ext.key)
controller = req_controllers[request_ext.key]
@@ -313,7 +330,7 @@ class ExtensionManager(object):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
- ExtensionController(self)))
+ ExtensionsResource(self)))
for alias, ext in self.extensions.iteritems():
try:
resources.extend(ext.get_resources())
@@ -410,7 +427,7 @@ class ExtensionManager(object):
class RequestExtension(object):
- """Extend requests and responses of core nova OpenStack API controllers.
+ """Extend requests and responses of core nova OpenStack API resources.
Provide a way to add data to responses and handle custom request data
that is sent to core nova OpenStack API controllers.
@@ -424,7 +441,7 @@ class RequestExtension(object):
class ActionExtension(object):
- """Add custom actions to core nova OpenStack API controllers."""
+ """Add custom actions to core nova OpenStack API resources."""
def __init__(self, collection, action_name, handler):
self.collection = collection
diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py
index 87118ce19..b9a23c126 100644
--- a/nova/api/openstack/faults.py
+++ b/nova/api/openstack/faults.py
@@ -19,8 +19,7 @@
import webob.dec
import webob.exc
-from nova import wsgi
-from nova.api.openstack import common
+from nova.api.openstack import wsgi
class Fault(webob.exc.HTTPException):
@@ -55,13 +54,21 @@ class Fault(webob.exc.HTTPException):
if code == 413:
retry = self.wrapped_exc.headers['Retry-After']
fault_data[fault_name]['retryAfter'] = retry
+
# 'code' is an attribute on the fault tag itself
- metadata = {'application/xml': {'attributes': {fault_name: 'code'}}}
- default_xmlns = common.XML_NS_V10
- serializer = wsgi.Serializer(metadata, default_xmlns)
+ metadata = {'attributes': {fault_name: 'code'}}
+
content_type = req.best_match_content_type()
- self.wrapped_exc.body = serializer.serialize(fault_data, content_type)
+
+ serializer = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
+ xmlns=wsgi.XMLNS_V10),
+ 'application/json': wsgi.JSONDictSerializer(),
+ }[content_type]
+
+ self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
+
return self.wrapped_exc
@@ -70,14 +77,6 @@ class OverLimitFault(webob.exc.HTTPException):
Rate-limited request response.
"""
- _serialization_metadata = {
- "application/xml": {
- "attributes": {
- "overLimitFault": "code",
- },
- },
- }
-
def __init__(self, message, details, retry_time):
"""
Initialize new `OverLimitFault` with relevant information.
@@ -97,8 +96,16 @@ class OverLimitFault(webob.exc.HTTPException):
Return the wrapped exception with a serialized body conforming to our
error format.
"""
- serializer = wsgi.Serializer(self._serialization_metadata)
content_type = request.best_match_content_type()
- content = serializer.serialize(self.content, content_type)
+ metadata = {"attributes": {"overLimitFault": "code"}}
+
+ serializer = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
+ xmlns=wsgi.XMLNS_V10),
+ 'application/json': wsgi.JSONDictSerializer(),
+ }[content_type]
+
+ content = serializer.serialize(self.content)
self.wrapped_exc.body = content
+
return self.wrapped_exc
diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py
index 4c5971cf6..a21ff6cb2 100644
--- a/nova/api/openstack/flavors.py
+++ b/nova/api/openstack/flavors.py
@@ -19,22 +19,13 @@ import webob
from nova import db
from nova import exception
-from nova.api.openstack import common
from nova.api.openstack import views
+from nova.api.openstack import wsgi
-class Controller(common.OpenstackController):
+class Controller(object):
"""Flavor controller for the OpenStack API."""
- _serialization_metadata = {
- 'application/xml': {
- "attributes": {
- "flavor": ["id", "name", "ram", "disk"],
- "link": ["rel", "type", "href"],
- }
- }
- }
-
def index(self, req):
"""Return all flavors in brief."""
items = self._get_flavors(req, is_detail=False)
@@ -71,14 +62,31 @@ class Controller(common.OpenstackController):
class ControllerV10(Controller):
+
def _get_view_builder(self, req):
return views.flavors.ViewBuilder()
class ControllerV11(Controller):
+
def _get_view_builder(self, req):
base_url = req.application_url
return views.flavors.ViewBuilderV11(base_url)
- def get_default_xmlns(self, req):
- return common.XML_NS_V11
+
+def create_resource(version='1.0'):
+ controller = {
+ '1.0': ControllerV10,
+ '1.1': ControllerV11,
+ }[version]()
+
+ xmlns = {
+ '1.0': wsgi.XMLNS_V10,
+ '1.1': wsgi.XMLNS_V11,
+ }[version]
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns),
+ }
+
+ return wsgi.Resource(controller, serializers=serializers)
diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py
index 1eccc0174..ebfe2bde9 100644
--- a/nova/api/openstack/image_metadata.py
+++ b/nova/api/openstack/image_metadata.py
@@ -18,22 +18,21 @@
from webob import exc
from nova import flags
+from nova import image
from nova import quota
from nova import utils
-from nova import wsgi
-from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
FLAGS = flags.FLAGS
-class Controller(common.OpenstackController):
+class Controller(object):
"""The image metadata API controller for the Openstack API"""
def __init__(self):
- self.image_service = utils.import_object(FLAGS.image_service)
- super(Controller, self).__init__()
+ self.image_service = image.get_default_image_service()
def _get_metadata(self, context, image_id, image=None):
if not image:
@@ -64,9 +63,8 @@ class Controller(common.OpenstackController):
else:
return faults.Fault(exc.HTTPNotFound())
- def create(self, req, image_id):
+ def create(self, req, image_id, body):
context = req.environ['nova.context']
- body = self._deserialize(req.body, req.get_content_type())
img = self.image_service.show(context, image_id)
metadata = self._get_metadata(context, image_id, img)
if 'metadata' in body:
@@ -77,9 +75,8 @@ class Controller(common.OpenstackController):
self.image_service.update(context, image_id, img, None)
return dict(metadata=metadata)
- def update(self, req, image_id, id):
+ def update(self, req, image_id, id, body):
context = req.environ['nova.context']
- body = self._deserialize(req.body, req.get_content_type())
if not id in body:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
@@ -104,3 +101,11 @@ class Controller(common.OpenstackController):
metadata.pop(id)
img['properties'] = metadata
self.image_service.update(context, image_id, img, None)
+
+
+def create_resource():
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 2e779da79..5ffd8e96a 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -18,11 +18,13 @@ import webob.exc
from nova import compute
from nova import exception
from nova import flags
+import nova.image
from nova import log
from nova import utils
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.api.openstack.views import images as images_view
+from nova.api.openstack import wsgi
LOG = log.getLogger('nova.api.openstack.images')
@@ -31,53 +33,19 @@ FLAGS = flags.FLAGS
SUPPORTED_FILTERS = ['name', 'status']
-class Controller(common.OpenstackController):
- """Base `wsgi.Controller` for retrieving/displaying images."""
-
- _serialization_metadata = {
- 'application/xml': {
- "attributes": {
- "image": ["id", "name", "updated", "created", "status",
- "serverId", "progress"],
- "link": ["rel", "type", "href"],
- },
- },
- }
+class Controller(object):
+ """Base controller for retrieving/displaying images."""
def __init__(self, image_service=None, compute_service=None):
"""Initialize new `ImageController`.
:param compute_service: `nova.compute.api:API`
:param image_service: `nova.image.service:BaseImageService`
- """
- _default_service = utils.import_object(flags.FLAGS.image_service)
-
- self._compute_service = compute_service or compute.API()
- self._image_service = image_service or _default_service
- def index(self, req):
- """Return an index listing of images available to the request.
-
- :param req: `wsgi.Request` object
"""
- context = req.environ['nova.context']
- filters = self._get_filters(req)
- images = self._image_service.index(context, filters)
- images = common.limited(images, req)
- builder = self.get_builder(req).build
- return dict(images=[builder(image, detail=False) for image in images])
-
- def detail(self, req):
- """Return a detailed index listing of images available to the request.
-
- :param req: `wsgi.Request` object.
- """
- context = req.environ['nova.context']
- filters = self._get_filters(req)
- images = self._image_service.detail(context, filters)
- images = common.limited(images, req)
- builder = self.get_builder(req).build
- return dict(images=[builder(image, detail=True) for image in images])
+ self._compute_service = compute_service or compute.API()
+ self._image_service = image_service or \
+ nova.image.get_default_image_service()
def _get_filters(self, req):
"""
@@ -97,22 +65,16 @@ class Controller(common.OpenstackController):
"""Return detailed information about a specific image.
:param req: `wsgi.Request` object
- :param id: Image identifier (integer)
+ :param id: Image identifier
"""
context = req.environ['nova.context']
try:
- image_id = int(id)
- except ValueError:
+ image = self._image_service.show(context, id)
+ except (exception.NotFound, exception.InvalidImageRef):
explanation = _("Image not found.")
raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation))
- try:
- image = self._image_service.show(context, image_id)
- except exception.NotFound:
- explanation = _("Image '%d' not found.") % (image_id)
- raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation))
-
return dict(image=self.get_builder(req).build(image, detail=True))
def delete(self, req, id):
@@ -121,26 +83,24 @@ class Controller(common.OpenstackController):
:param req: `wsgi.Request` object
:param id: Image identifier (integer)
"""
- image_id = id
context = req.environ['nova.context']
- self._image_service.delete(context, image_id)
+ self._image_service.delete(context, id)
return webob.exc.HTTPNoContent()
- def create(self, req):
+ def create(self, req, body):
"""Snapshot a server instance and save the image.
:param req: `wsgi.Request` object
"""
context = req.environ['nova.context']
content_type = req.get_content_type()
- image = self._deserialize(req.body, content_type)
- if not image:
+ if not body:
raise webob.exc.HTTPBadRequest()
try:
- server_id = image["image"]["serverId"]
- image_name = image["image"]["name"]
+ server_id = self._server_id_from_req_data(body)
+ image_name = body["image"]["name"]
except KeyError:
raise webob.exc.HTTPBadRequest()
@@ -151,6 +111,9 @@ class Controller(common.OpenstackController):
"""Indicates that you must use a Controller subclass."""
raise NotImplementedError
+ def _server_id_from_req_data(self, data):
+ raise NotImplementedError()
+
class ControllerV10(Controller):
"""Version 1.0 specific controller logic."""
@@ -160,6 +123,35 @@ class ControllerV10(Controller):
base_url = request.application_url
return images_view.ViewBuilderV10(base_url)
+ def index(self, req):
+ """Return an index listing of images available to the request.
+
+ :param req: `wsgi.Request` object
+
+ """
+ context = req.environ['nova.context']
+ filters = self._get_filters(req)
+ images = self._image_service.index(context, filters)
+ images = common.limited(images, req)
+ builder = self.get_builder(req).build
+ return dict(images=[builder(image, detail=False) for image in images])
+
+ def detail(self, req):
+ """Return a detailed index listing of images available to the request.
+
+ :param req: `wsgi.Request` object.
+
+ """
+ context = req.environ['nova.context']
+ filters = self._get_filters(req)
+ images = self._image_service.detail(context, filters)
+ images = common.limited(images, req)
+ builder = self.get_builder(req).build
+ return dict(images=[builder(image, detail=True) for image in images])
+
+ def _server_id_from_req_data(self, data):
+ return data['image']['serverId']
+
class ControllerV11(Controller):
"""Version 1.1 specific controller logic."""
@@ -169,5 +161,60 @@ class ControllerV11(Controller):
base_url = request.application_url
return images_view.ViewBuilderV11(base_url)
- def get_default_xmlns(self, req):
- return common.XML_NS_V11
+ def index(self, req):
+ """Return an index listing of images available to the request.
+
+ :param req: `wsgi.Request` object
+
+ """
+ context = req.environ['nova.context']
+ filters = self._get_filters(req)
+ (marker, limit) = common.get_pagination_params(req)
+ images = self._image_service.index(
+ context, filters=filters, marker=marker, limit=limit)
+ builder = self.get_builder(req).build
+ return dict(images=[builder(image, detail=False) for image in images])
+
+ def detail(self, req):
+ """Return a detailed index listing of images available to the request.
+
+ :param req: `wsgi.Request` object.
+
+ """
+ context = req.environ['nova.context']
+ filters = self._get_filters(req)
+ (marker, limit) = common.get_pagination_params(req)
+ images = self._image_service.detail(
+ context, filters=filters, marker=marker, limit=limit)
+ builder = self.get_builder(req).build
+ return dict(images=[builder(image, detail=True) for image in images])
+
+ def _server_id_from_req_data(self, data):
+ return data['image']['serverRef']
+
+
+def create_resource(version='1.0'):
+ controller = {
+ '1.0': ControllerV10,
+ '1.1': ControllerV11,
+ }[version]()
+
+ xmlns = {
+ '1.0': wsgi.XMLNS_V10,
+ '1.1': wsgi.XMLNS_V11,
+ }[version]
+
+ metadata = {
+ "attributes": {
+ "image": ["id", "name", "updated", "created", "status",
+ "serverId", "progress", "serverRef"],
+ "link": ["rel", "type", "href"],
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns,
+ metadata=metadata),
+ }
+
+ return wsgi.Resource(controller, serializers=serializers)
diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py
index 778e9ba1a..abea71830 100644
--- a/nova/api/openstack/ips.py
+++ b/nova/api/openstack/ips.py
@@ -20,23 +20,14 @@ import time
from webob import exc
import nova
-import nova.api.openstack.views.addresses
-from nova.api.openstack import common
from nova.api.openstack import faults
+import nova.api.openstack.views.addresses
+from nova.api.openstack import wsgi
-class Controller(common.OpenstackController):
+class Controller(object):
"""The servers addresses API controller for the Openstack API."""
- _serialization_metadata = {
- 'application/xml': {
- 'list_collections': {
- 'public': {'item_name': 'ip', 'item_key': 'addr'},
- 'private': {'item_name': 'ip', 'item_key': 'addr'},
- },
- },
- }
-
def __init__(self):
self.compute_api = nova.compute.API()
self.builder = nova.api.openstack.views.addresses.ViewBuilderV10()
@@ -65,8 +56,24 @@ class Controller(common.OpenstackController):
def show(self, req, server_id, id):
return faults.Fault(exc.HTTPNotImplemented())
- def create(self, req, server_id):
+ def create(self, req, server_id, body):
return faults.Fault(exc.HTTPNotImplemented())
def delete(self, req, server_id, id):
return faults.Fault(exc.HTTPNotImplemented())
+
+
+def create_resource():
+ metadata = {
+ 'list_collections': {
+ 'public': {'item_name': 'ip', 'item_key': 'addr'},
+ 'private': {'item_name': 'ip', 'item_key': 'addr'},
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
+ xmlns=wsgi.XMLNS_V10),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py
index bd0250a7f..fede96e33 100644
--- a/nova/api/openstack/limits.py
+++ b/nova/api/openstack/limits.py
@@ -11,7 +11,7 @@
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
-# under the License.import datetime
+# under the License.
"""
Module dedicated functions/classes dealing with rate limiting requests.
@@ -31,10 +31,12 @@ from collections import defaultdict
from webob.dec import wsgify
from nova import quota
+from nova import wsgi as base_wsgi
from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.api.openstack.views import limits as limits_views
+from nova.api.openstack import wsgi
# Convenience constants for the limits dictionary passed to Limiter().
@@ -44,23 +46,11 @@ PER_HOUR = 60 * 60
PER_DAY = 60 * 60 * 24
-class LimitsController(common.OpenstackController):
+class LimitsController(object):
"""
Controller for accessing limits in the OpenStack API.
"""
- _serialization_metadata = {
- "application/xml": {
- "attributes": {
- "limit": ["verb", "URI", "uri", "regex", "value", "unit",
- "resetTime", "next-available", "remaining", "name"],
- },
- "plurals": {
- "rate": "limit",
- },
- },
- }
-
def index(self, req):
"""
Return all global and rate limit information.
@@ -86,6 +76,35 @@ class LimitsControllerV11(LimitsController):
return limits_views.ViewBuilderV11()
+def create_resource(version='1.0'):
+ controller = {
+ '1.0': LimitsControllerV10,
+ '1.1': LimitsControllerV11,
+ }[version]()
+
+ xmlns = {
+ '1.0': wsgi.XMLNS_V10,
+ '1.1': wsgi.XMLNS_V11,
+ }[version]
+
+ metadata = {
+ "attributes": {
+ "limit": ["verb", "URI", "uri", "regex", "value", "unit",
+ "resetTime", "next-available", "remaining", "name"],
+ },
+ "plurals": {
+ "rate": "limit",
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns,
+ metadata=metadata),
+ }
+
+ return wsgi.Resource(controller, serializers=serializers)
+
+
class Limit(object):
"""
Stores information about a limit for HTTP requets.
@@ -197,7 +216,7 @@ DEFAULT_LIMITS = [
]
-class RateLimitingMiddleware(wsgi.Middleware):
+class RateLimitingMiddleware(base_wsgi.Middleware):
"""
Rate-limits requests passing through this middleware. All limit information
is stored in memory for this implementation.
@@ -211,7 +230,7 @@ class RateLimitingMiddleware(wsgi.Middleware):
@param application: WSGI application to wrap
@param limits: List of dictionaries describing limits
"""
- wsgi.Middleware.__init__(self, application)
+ base_wsgi.Middleware.__init__(self, application)
self._limiter = Limiter(limits or DEFAULT_LIMITS)
@wsgify(RequestClass=wsgi.Request)
diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py
index 88ffc3246..9ede548c2 100644
--- a/nova/api/openstack/ratelimiting/__init__.py
+++ b/nova/api/openstack/ratelimiting/__init__.py
@@ -13,7 +13,7 @@
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
-# under the License.import datetime
+# under the License.
"""Rate limiting of arbitrary actions."""
diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py
index fd64ee4fb..57666f6b7 100644
--- a/nova/api/openstack/server_metadata.py
+++ b/nova/api/openstack/server_metadata.py
@@ -19,12 +19,11 @@ from webob import exc
from nova import compute
from nova import quota
-from nova import wsgi
-from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
-class Controller(common.OpenstackController):
+class Controller(object):
""" The server metadata API controller for the Openstack API """
def __init__(self):
@@ -38,26 +37,31 @@ class Controller(common.OpenstackController):
meta_dict[key] = value
return dict(metadata=meta_dict)
+ def _check_body(self, body):
+ if body == None or body == "":
+ expl = _('No Request Body')
+ raise exc.HTTPBadRequest(explanation=expl)
+
def index(self, req, server_id):
""" Returns the list of metadata for a given instance """
context = req.environ['nova.context']
return self._get_metadata(context, server_id)
- def create(self, req, server_id):
+ def create(self, req, server_id, body):
+ self._check_body(body)
context = req.environ['nova.context']
- data = self._deserialize(req.body, req.get_content_type())
- metadata = data.get('metadata')
+ metadata = body.get('metadata')
try:
self.compute_api.update_or_create_instance_metadata(context,
server_id,
metadata)
except quota.QuotaError as error:
self._handle_quota_error(error)
- return req.body
+ return body
- def update(self, req, server_id, id):
+ def update(self, req, server_id, id, body):
+ self._check_body(body)
context = req.environ['nova.context']
- body = self._deserialize(req.body, req.get_content_type())
if not id in body:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
@@ -71,7 +75,7 @@ class Controller(common.OpenstackController):
except quota.QuotaError as error:
self._handle_quota_error(error)
- return req.body
+ return body
def show(self, req, server_id, id):
""" Return a single metadata item """
@@ -92,3 +96,11 @@ class Controller(common.OpenstackController):
if error.code == "MetadataLimitExceeded":
raise exc.HTTPBadRequest(explanation=error.message)
raise error
+
+
+def create_resource():
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 8e191c232..798fdd7f7 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -17,22 +17,20 @@ import base64
import traceback
from webob import exc
-from xml.dom import minidom
from nova import compute
from nova import exception
from nova import flags
from nova import log as logging
-from nova import quota
from nova import utils
from nova.api.openstack import common
+from nova.api.openstack import create_instance_helper as helper
from nova.api.openstack import faults
import nova.api.openstack.views.addresses
import nova.api.openstack.views.flavors
import nova.api.openstack.views.images
import nova.api.openstack.views.servers
-from nova.auth import manager as auth_manager
-from nova.compute import instance_types
+from nova.api.openstack import wsgi
import nova.api.openstack
from nova.scheduler import api as scheduler_api
@@ -41,45 +39,28 @@ LOG = logging.getLogger('nova.api.openstack.servers')
FLAGS = flags.FLAGS
-class Controller(common.OpenstackController):
+class Controller(object):
""" The Server API controller for the OpenStack API """
- _serialization_metadata = {
- "application/xml": {
- "attributes": {
- "server": ["id", "imageId", "name", "flavorId", "hostId",
- "status", "progress", "adminPass", "flavorRef",
- "imageRef"],
- "link": ["rel", "type", "href"],
- },
- "dict_collections": {
- "metadata": {"item_name": "meta", "item_key": "key"},
- },
- "list_collections": {
- "public": {"item_name": "ip", "item_key": "addr"},
- "private": {"item_name": "ip", "item_key": "addr"},
- },
- },
- }
-
def __init__(self):
self.compute_api = compute.API()
- self._image_service = utils.import_object(FLAGS.image_service)
- super(Controller, self).__init__()
+ self.helper = helper.CreateInstanceHelper(self)
def index(self, req):
""" Returns a list of server names and ids for a given user """
- return self._items(req, is_detail=False)
+ try:
+ servers = self._items(req, is_detail=False)
+ except exception.Invalid as err:
+ return exc.HTTPBadRequest(str(err))
+ return servers
def detail(self, req):
""" Returns a list of server details for a given user """
- return self._items(req, is_detail=True)
-
- def _image_id_from_req_data(self, data):
- raise NotImplementedError()
-
- def _flavor_id_from_req_data(self, data):
- raise NotImplementedError()
+ try:
+ servers = self._items(req, is_detail=True)
+ except exception.Invalid as err:
+ return exc.HTTPBadRequest(str(err))
+ return servers
def _get_view_builder(self, req):
raise NotImplementedError()
@@ -95,7 +76,10 @@ class Controller(common.OpenstackController):
builder - the response model builder
"""
- instance_list = self.compute_api.get_all(req.environ['nova.context'])
+ reservation_id = req.str_GET.get('reservation_id')
+ instance_list = self.compute_api.get_all(
+ req.environ['nova.context'],
+ reservation_id=reservation_id)
limited_list = self._limit_items(instance_list, req)
builder = self._get_view_builder(req)
servers = [builder.build(inst, is_detail)['server']
@@ -122,156 +106,45 @@ class Controller(common.OpenstackController):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
- def create(self, req):
+ def create(self, req, body):
""" Creates a new server for a given user """
- env = self._deserialize_create(req)
- if not env:
- return faults.Fault(exc.HTTPUnprocessableEntity())
-
- context = req.environ['nova.context']
-
- password = self._get_server_admin_password(env['server'])
-
- key_name = None
- key_data = None
- key_pairs = auth_manager.AuthManager.get_key_pairs(context)
- if key_pairs:
- key_pair = key_pairs[0]
- key_name = key_pair['name']
- key_data = key_pair['public_key']
-
- requested_image_id = self._image_id_from_req_data(env)
+ extra_values = None
+ result = None
try:
- image_id = common.get_image_id_from_image_hash(self._image_service,
- context, requested_image_id)
- except:
- msg = _("Can not find requested image")
- return faults.Fault(exc.HTTPBadRequest(msg))
-
- kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
- req, image_id)
-
- personality = env['server'].get('personality')
- injected_files = []
- if personality:
- injected_files = self._get_injected_files(personality)
-
- flavor_id = self._flavor_id_from_req_data(env)
-
- if not 'name' in env['server']:
- msg = _("Server name is not defined")
- return exc.HTTPBadRequest(msg)
+ extra_values, result = self.helper.create_instance(
+ req, body, self.compute_api.create)
+ except faults.Fault, f:
+ return f
- name = env['server']['name']
- self._validate_server_name(name)
- name = name.strip()
+ instances = result
- try:
- inst_type = \
- instance_types.get_instance_type_by_flavor_id(flavor_id)
- (inst,) = self.compute_api.create(
- context,
- inst_type,
- image_id,
- kernel_id=kernel_id,
- ramdisk_id=ramdisk_id,
- display_name=name,
- display_description=name,
- key_name=key_name,
- key_data=key_data,
- metadata=env['server'].get('metadata', {}),
- injected_files=injected_files,
- admin_password=password)
- except quota.QuotaError as error:
- self._handle_quota_error(error)
-
- inst['instance_type'] = inst_type
- inst['image_id'] = requested_image_id
+ (inst, ) = instances
+ for key in ['instance_type', 'image_ref']:
+ inst[key] = extra_values[key]
builder = self._get_view_builder(req)
server = builder.build(inst, is_detail=True)
- server['server']['adminPass'] = password
+ server['server']['adminPass'] = extra_values['password']
return server
- def _deserialize_create(self, request):
- """
- Deserialize a create request
-
- Overrides normal behavior in the case of xml content
- """
- if request.content_type == "application/xml":
- deserializer = ServerCreateRequestXMLDeserializer()
- return deserializer.deserialize(request.body)
- else:
- return self._deserialize(request.body, request.get_content_type())
-
- def _get_injected_files(self, personality):
- """
- Create a list of injected files from the personality attribute
-
- At this time, injected_files must be formatted as a list of
- (file_path, file_content) pairs for compatibility with the
- underlying compute service.
- """
- injected_files = []
-
- for item in personality:
- try:
- path = item['path']
- contents = item['contents']
- except KeyError as key:
- expl = _('Bad personality format: missing %s') % key
- raise exc.HTTPBadRequest(explanation=expl)
- except TypeError:
- expl = _('Bad personality format')
- raise exc.HTTPBadRequest(explanation=expl)
- try:
- contents = base64.b64decode(contents)
- except TypeError:
- expl = _('Personality content for %s cannot be decoded') % path
- raise exc.HTTPBadRequest(explanation=expl)
- injected_files.append((path, contents))
- return injected_files
-
- def _handle_quota_error(self, error):
- """
- Reraise quota errors as api-specific http exceptions
- """
- if error.code == "OnsetFileLimitExceeded":
- expl = _("Personality file limit exceeded")
- raise exc.HTTPBadRequest(explanation=expl)
- if error.code == "OnsetFilePathLimitExceeded":
- expl = _("Personality file path too long")
- raise exc.HTTPBadRequest(explanation=expl)
- if error.code == "OnsetFileContentLimitExceeded":
- expl = _("Personality file content too long")
- raise exc.HTTPBadRequest(explanation=expl)
- # if the original error is okay, just reraise it
- raise error
-
- def _get_server_admin_password(self, server):
- """ Determine the admin password for a server on creation """
- return utils.generate_password(16)
-
@scheduler_api.redirect_handler
- def update(self, req, id):
+ def update(self, req, id, body):
""" Updates the server name or password """
if len(req.body) == 0:
raise exc.HTTPUnprocessableEntity()
- inst_dict = self._deserialize(req.body, req.get_content_type())
- if not inst_dict:
+ if not body:
return faults.Fault(exc.HTTPUnprocessableEntity())
ctxt = req.environ['nova.context']
update_dict = {}
- if 'name' in inst_dict['server']:
- name = inst_dict['server']['name']
- self._validate_server_name(name)
+ if 'name' in body['server']:
+ name = body['server']['name']
+ self.helper._validate_server_name(name)
update_dict['display_name'] = name.strip()
- self._parse_update(ctxt, id, inst_dict, update_dict)
+ self._parse_update(ctxt, id, body, update_dict)
try:
self.compute_api.update(ctxt, id, **update_dict)
@@ -280,20 +153,11 @@ class Controller(common.OpenstackController):
return exc.HTTPNoContent()
- def _validate_server_name(self, value):
- if not isinstance(value, basestring):
- msg = _("Server name is not a string or unicode")
- raise exc.HTTPBadRequest(msg)
-
- if value.strip() == '':
- msg = _("Server name is an empty string")
- raise exc.HTTPBadRequest(msg)
-
def _parse_update(self, context, id, inst_dict, update_dict):
pass
@scheduler_api.redirect_handler
- def action(self, req, id):
+ def action(self, req, id, body):
"""Multi-purpose method used to reboot, rebuild, or
resize a server"""
@@ -306,10 +170,9 @@ class Controller(common.OpenstackController):
'rebuild': self._action_rebuild,
}
- input_dict = self._deserialize(req.body, req.get_content_type())
for key in actions.keys():
- if key in input_dict:
- return actions[key](input_dict, req, id)
+ if key in body:
+ return actions[key](body, req, id)
return faults.Fault(exc.HTTPNotImplemented())
def _action_change_password(self, input_dict, req, id):
@@ -332,19 +195,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
def _action_resize(self, input_dict, req, id):
- """ Resizes a given instance to the flavor size requested """
- try:
- if 'resize' in input_dict and 'flavorId' in input_dict['resize']:
- flavor_id = input_dict['resize']['flavorId']
- self.compute_api.resize(req.environ['nova.context'], id,
- flavor_id)
- else:
- LOG.exception(_("Missing arguments for resize"))
- return faults.Fault(exc.HTTPUnprocessableEntity())
- except Exception, e:
- LOG.exception(_("Error in resize %s"), e)
- return faults.Fault(exc.HTTPBadRequest())
- return exc.HTTPAccepted()
+ return exc.HTTPNotImplemented()
def _action_reboot(self, input_dict, req, id):
if 'reboot' in input_dict and 'type' in input_dict['reboot']:
@@ -409,7 +260,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
@scheduler_api.redirect_handler
- def reset_network(self, req, id):
+ def reset_network(self, req, id, body):
"""
Reset networking on an instance (admin only).
@@ -424,7 +275,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
@scheduler_api.redirect_handler
- def inject_network_info(self, req, id):
+ def inject_network_info(self, req, id, body):
"""
Inject network info for an instance (admin only).
@@ -439,7 +290,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
@scheduler_api.redirect_handler
- def pause(self, req, id):
+ def pause(self, req, id, body):
""" Permit Admins to Pause the server. """
ctxt = req.environ['nova.context']
try:
@@ -451,7 +302,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
@scheduler_api.redirect_handler
- def unpause(self, req, id):
+ def unpause(self, req, id, body):
""" Permit Admins to Unpause the server. """
ctxt = req.environ['nova.context']
try:
@@ -463,7 +314,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
@scheduler_api.redirect_handler
- def suspend(self, req, id):
+ def suspend(self, req, id, body):
"""permit admins to suspend the server"""
context = req.environ['nova.context']
try:
@@ -475,7 +326,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
@scheduler_api.redirect_handler
- def resume(self, req, id):
+ def resume(self, req, id, body):
"""permit admins to resume the server from suspend"""
context = req.environ['nova.context']
try:
@@ -550,48 +401,10 @@ class Controller(common.OpenstackController):
error=item.error))
return dict(actions=actions)
- def _get_kernel_ramdisk_from_image(self, req, image_id):
- """Fetch an image from the ImageService, then if present, return the
- associated kernel and ramdisk image IDs.
- """
- context = req.environ['nova.context']
- image_meta = self._image_service.show(context, image_id)
- # NOTE(sirp): extracted to a separate method to aid unit-testing, the
- # new method doesn't need a request obj or an ImageService stub
- kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image(
- image_meta)
- return kernel_id, ramdisk_id
-
- @staticmethod
- def _do_get_kernel_ramdisk_from_image(image_meta):
- """Given an ImageService image_meta, return kernel and ramdisk image
- ids if present.
-
- This is only valid for `ami` style images.
- """
- image_id = image_meta['id']
- if image_meta['status'] != 'active':
- raise exception.ImageUnacceptable(image_id=image_id,
- reason=_("status is not active"))
-
- if image_meta.get('container_format') != 'ami':
- return None, None
-
- try:
- kernel_id = image_meta['properties']['kernel_id']
- except KeyError:
- raise exception.KernelNotFoundForImage(image_id=image_id)
-
- try:
- ramdisk_id = image_meta['properties']['ramdisk_id']
- except KeyError:
- raise exception.RamdiskNotFoundForImage(image_id=image_id)
-
- return kernel_id, ramdisk_id
-
class ControllerV10(Controller):
- def _image_id_from_req_data(self, data):
+
+ def _image_ref_from_req_data(self, data):
return data['server']['imageId']
def _flavor_id_from_req_data(self, data):
@@ -610,6 +423,21 @@ class ControllerV10(Controller):
self.compute_api.set_admin_password(context, server_id,
inst_dict['server']['adminPass'])
+ def _action_resize(self, input_dict, req, id):
+ """ Resizes a given instance to the flavor size requested """
+ try:
+ if 'resize' in input_dict and 'flavorId' in input_dict['resize']:
+ flavor_id = input_dict['resize']['flavorId']
+ self.compute_api.resize(req.environ['nova.context'], id,
+ flavor_id)
+ else:
+ LOG.exception(_("Missing 'flavorId' argument for resize"))
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ except Exception, e:
+ LOG.exception(_("Error in resize %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return exc.HTTPAccepted()
+
def _action_rebuild(self, info, request, instance_id):
context = request.environ['nova.context']
instance_id = int(instance_id)
@@ -632,11 +460,14 @@ class ControllerV10(Controller):
response.empty_body = True
return response
+ def _get_server_admin_password(self, server):
+ """ Determine the admin password for a server on creation """
+ return self.helper._get_server_admin_password_old_style(server)
+
class ControllerV11(Controller):
- def _image_id_from_req_data(self, data):
- href = data['server']['imageRef']
- return common.get_id_from_href(href)
+ def _image_ref_from_req_data(self, data):
+ return data['server']['imageRef']
def _flavor_id_from_req_data(self, data):
href = data['server']['flavorRef']
@@ -695,18 +526,33 @@ class ControllerV11(Controller):
LOG.info(msg)
raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
+ def _action_resize(self, input_dict, req, id):
+ """ Resizes a given instance to the flavor size requested """
+ try:
+ if 'resize' in input_dict and 'flavorRef' in input_dict['resize']:
+ flavor_ref = input_dict['resize']['flavorRef']
+ flavor_id = common.get_id_from_href(flavor_ref)
+ self.compute_api.resize(req.environ['nova.context'], id,
+ flavor_id)
+ else:
+ LOG.exception(_("Missing 'flavorRef' argument for resize"))
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ except Exception, e:
+ LOG.exception(_("Error in resize %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return exc.HTTPAccepted()
+
def _action_rebuild(self, info, request, instance_id):
context = request.environ['nova.context']
instance_id = int(instance_id)
try:
- image_ref = info["rebuild"]["imageRef"]
+ image_href = info["rebuild"]["imageRef"]
except (KeyError, TypeError):
msg = _("Could not parse imageRef from request.")
LOG.debug(msg)
return faults.Fault(exc.HTTPBadRequest(explanation=msg))
- image_id = common.get_id_from_href(image_ref)
personalities = info["rebuild"].get("personality", [])
metadata = info["rebuild"].get("metadata")
name = info["rebuild"].get("name")
@@ -716,7 +562,7 @@ class ControllerV11(Controller):
self._decode_personalities(personalities)
try:
- self.compute_api.rebuild(context, instance_id, image_id, name,
+ self.compute_api.rebuild(context, instance_id, image_href, name,
metadata, personalities)
except exception.BuildInProgress:
msg = _("Instance %d is currently being rebuilt.") % instance_id
@@ -727,92 +573,49 @@ class ControllerV11(Controller):
response.empty_body = True
return response
+ def get_default_xmlns(self, req):
+ return common.XML_NS_V11
+
def _get_server_admin_password(self, server):
""" Determine the admin password for a server on creation """
- password = server.get('adminPass')
- if password is None:
- return utils.generate_password(16)
- if not isinstance(password, basestring) or password == '':
- msg = _("Invalid adminPass")
- raise exc.HTTPBadRequest(msg)
- return password
+ return self.helper._get_server_admin_password_new_style(server)
- def get_default_xmlns(self, req):
- return common.XML_NS_V11
+def create_resource(version='1.0'):
+ controller = {
+ '1.0': ControllerV10,
+ '1.1': ControllerV11,
+ }[version]()
-class ServerCreateRequestXMLDeserializer(object):
- """
- Deserializer to handle xml-formatted server create requests.
-
- Handles standard server attributes as well as optional metadata
- and personality attributes
- """
-
- def deserialize(self, string):
- """Deserialize an xml-formatted server create request"""
- dom = minidom.parseString(string)
- server = self._extract_server(dom)
- return {'server': server}
-
- def _extract_server(self, node):
- """Marshal the server attribute of a parsed request"""
- server = {}
- server_node = self._find_first_child_named(node, 'server')
- for attr in ["name", "imageId", "flavorId", "imageRef", "flavorRef"]:
- if server_node.getAttribute(attr):
- server[attr] = server_node.getAttribute(attr)
- metadata = self._extract_metadata(server_node)
- if metadata is not None:
- server["metadata"] = metadata
- personality = self._extract_personality(server_node)
- if personality is not None:
- server["personality"] = personality
- return server
+ metadata = {
+ "attributes": {
+ "server": ["id", "imageId", "name", "flavorId", "hostId",
+ "status", "progress", "adminPass", "flavorRef",
+ "imageRef"],
+ "link": ["rel", "type", "href"],
+ },
+ "dict_collections": {
+ "metadata": {"item_name": "meta", "item_key": "key"},
+ },
+ "list_collections": {
+ "public": {"item_name": "ip", "item_key": "addr"},
+ "private": {"item_name": "ip", "item_key": "addr"},
+ },
+ }
+
+ xmlns = {
+ '1.0': wsgi.XMLNS_V10,
+ '1.1': wsgi.XMLNS_V11,
+ }[version]
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
+ xmlns=xmlns),
+ }
+
+ deserializers = {
+ 'application/xml': helper.ServerXMLDeserializer(),
+ }
- def _extract_metadata(self, server_node):
- """Marshal the metadata attribute of a parsed request"""
- metadata_node = self._find_first_child_named(server_node, "metadata")
- if metadata_node is None:
- return None
- metadata = {}
- for meta_node in self._find_children_named(metadata_node, "meta"):
- key = meta_node.getAttribute("key")
- metadata[key] = self._extract_text(meta_node)
- return metadata
-
- def _extract_personality(self, server_node):
- """Marshal the personality attribute of a parsed request"""
- personality_node = \
- self._find_first_child_named(server_node, "personality")
- if personality_node is None:
- return None
- personality = []
- for file_node in self._find_children_named(personality_node, "file"):
- item = {}
- if file_node.hasAttribute("path"):
- item["path"] = file_node.getAttribute("path")
- item["contents"] = self._extract_text(file_node)
- personality.append(item)
- return personality
-
- def _find_first_child_named(self, parent, name):
- """Search a nodes children for the first child with a given name"""
- for node in parent.childNodes:
- if node.nodeName == name:
- return node
- return None
-
- def _find_children_named(self, parent, name):
- """Return all of a nodes children who have the given name"""
- for node in parent.childNodes:
- if node.nodeName == name:
- yield node
-
- def _extract_text(self, node):
- """Get the text field contained by the given node"""
- if len(node.childNodes) == 1:
- child = node.childNodes[0]
- if child.nodeType == child.TEXT_NODE:
- return child.nodeValue
- return ""
+ return wsgi.Resource(controller, serializers=serializers,
+ deserializers=deserializers)
diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py
index 996db3648..4f11f8dfb 100644
--- a/nova/api/openstack/shared_ip_groups.py
+++ b/nova/api/openstack/shared_ip_groups.py
@@ -17,29 +17,13 @@
from webob import exc
-from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
-def _translate_keys(inst):
- """ Coerces a shared IP group instance into proper dictionary format """
- return dict(sharedIpGroup=inst)
-
-
-def _translate_detail_keys(inst):
- """ Coerces a shared IP group instance into proper dictionary format with
- correctly mapped attributes """
- return dict(sharedIpGroups=inst)
-
-
-class Controller(common.OpenstackController):
+class Controller(object):
""" The Shared IP Groups Controller for the Openstack API """
- _serialization_metadata = {
- 'application/xml': {
- 'attributes': {
- 'sharedIpGroup': []}}}
-
def index(self, req):
""" Returns a list of Shared IP Groups for the user """
raise faults.Fault(exc.HTTPNotImplemented())
@@ -48,7 +32,7 @@ class Controller(common.OpenstackController):
""" Shows in-depth information on a specific Shared IP Group """
raise faults.Fault(exc.HTTPNotImplemented())
- def update(self, req, id):
+ def update(self, req, id, body):
""" You can't update a Shared IP Group """
raise faults.Fault(exc.HTTPNotImplemented())
@@ -60,6 +44,10 @@ class Controller(common.OpenstackController):
""" Returns a complete list of Shared IP Groups """
raise faults.Fault(exc.HTTPNotImplemented())
- def create(self, req):
+ def create(self, req, body):
""" Creates a new Shared IP group """
raise faults.Fault(exc.HTTPNotImplemented())
+
+
+def create_resource():
+ return wsgi.Resource(Controller())
diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py
index 7ae4c3232..50975fc1f 100644
--- a/nova/api/openstack/users.py
+++ b/nova/api/openstack/users.py
@@ -20,8 +20,10 @@ from nova import flags
from nova import log as logging
from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
from nova.auth import manager
+
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.api.openstack')
@@ -34,12 +36,7 @@ def _translate_keys(user):
admin=user.admin)
-class Controller(common.OpenstackController):
-
- _serialization_metadata = {
- 'application/xml': {
- "attributes": {
- "user": ["id", "name", "access", "secret", "admin"]}}}
+class Controller(object):
def __init__(self):
self.manager = manager.AuthManager()
@@ -81,23 +78,35 @@ class Controller(common.OpenstackController):
self.manager.delete_user(id)
return {}
- def create(self, req):
+ def create(self, req, body):
self._check_admin(req.environ['nova.context'])
- env = self._deserialize(req.body, req.get_content_type())
- is_admin = env['user'].get('admin') in ('T', 'True', True)
- name = env['user'].get('name')
- access = env['user'].get('access')
- secret = env['user'].get('secret')
+ is_admin = body['user'].get('admin') in ('T', 'True', True)
+ name = body['user'].get('name')
+ access = body['user'].get('access')
+ secret = body['user'].get('secret')
user = self.manager.create_user(name, access, secret, is_admin)
return dict(user=_translate_keys(user))
- def update(self, req, id):
+ def update(self, req, id, body):
self._check_admin(req.environ['nova.context'])
- env = self._deserialize(req.body, req.get_content_type())
- is_admin = env['user'].get('admin')
+ is_admin = body['user'].get('admin')
if is_admin is not None:
is_admin = is_admin in ('T', 'True', True)
- access = env['user'].get('access')
- secret = env['user'].get('secret')
+ access = body['user'].get('access')
+ secret = body['user'].get('secret')
self.manager.modify_user(id, access, secret, is_admin)
return dict(user=_translate_keys(self.manager.get_user(id)))
+
+
+def create_resource():
+ metadata = {
+ "attributes": {
+ "user": ["id", "name", "access", "secret", "admin"],
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py
index 3f9d91934..4c682302f 100644
--- a/nova/api/openstack/versions.py
+++ b/nova/api/openstack/versions.py
@@ -18,13 +18,26 @@
import webob
import webob.dec
-from nova import wsgi
import nova.api.openstack.views.versions
+from nova.api.openstack import wsgi
-class Versions(wsgi.Application):
- @webob.dec.wsgify(RequestClass=wsgi.Request)
- def __call__(self, req):
+class Versions(wsgi.Resource):
+ def __init__(self):
+ metadata = {
+ "attributes": {
+ "version": ["status", "id"],
+ "link": ["rel", "href"],
+ }
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
+ }
+
+ wsgi.Resource.__init__(self, None, serializers=serializers)
+
+ def dispatch(self, request, *args):
"""Respond to a request for all OpenStack API versions."""
version_objs = [
{
@@ -37,24 +50,6 @@ class Versions(wsgi.Application):
},
]
- builder = nova.api.openstack.views.versions.get_view_builder(req)
+ builder = nova.api.openstack.views.versions.get_view_builder(request)
versions = [builder.build(version) for version in version_objs]
- response = dict(versions=versions)
-
- metadata = {
- "application/xml": {
- "attributes": {
- "version": ["status", "id"],
- "link": ["rel", "href"],
- }
- }
- }
-
- content_type = req.best_match_content_type()
- body = wsgi.Serializer(metadata).serialize(response, content_type)
-
- response = webob.Response()
- response.content_type = content_type
- response.body = body
-
- return response
+ return dict(versions=versions)
diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py
index e21c9f2fd..934b4921a 100644
--- a/nova/api/openstack/views/limits.py
+++ b/nova/api/openstack/views/limits.py
@@ -29,9 +29,6 @@ class ViewBuilder(object):
def _build_rate_limit(self, rate_limit):
raise NotImplementedError()
- def _build_absolute_limits(self, absolute_limit):
- raise NotImplementedError()
-
def build(self, rate_limits, absolute_limits):
rate_limits = self._build_rate_limits(rate_limits)
absolute_limits = self._build_absolute_limits(absolute_limits)
@@ -67,12 +64,6 @@ class ViewBuilder(object):
limits[name] = value
return limits
- def _build_rate_limits(self, rate_limits):
- raise NotImplementedError()
-
- def _build_rate_limit(self, rate_limit):
- raise NotImplementedError()
-
class ViewBuilderV10(ViewBuilder):
"""Openstack API v1.0 limits view builder."""
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index 0be468edc..245d0e3fa 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -18,6 +18,7 @@
import hashlib
import os
+from nova import exception
from nova.compute import power_state
import nova.compute
import nova.context
@@ -41,12 +42,15 @@ class ViewBuilder(object):
def build(self, inst, is_detail):
"""Return a dict that represenst a server."""
- if is_detail:
- server = self._build_detail(inst)
+ if inst.get('_is_precooked', False):
+ server = dict(server=inst)
else:
- server = self._build_simple(inst)
+ if is_detail:
+ server = self._build_detail(inst)
+ else:
+ server = self._build_simple(inst)
- self._build_extra(server, inst)
+ self._build_extra(server, inst)
return server
@@ -78,6 +82,7 @@ class ViewBuilder(object):
ctxt = nova.context.get_admin_context()
compute_api = nova.compute.API()
+
if compute_api.has_finished_migration(ctxt, inst['id']):
inst_dict['status'] = 'RESIZE-CONFIRM'
@@ -112,8 +117,11 @@ class ViewBuilderV10(ViewBuilder):
"""Model an Openstack API V1.0 server response."""
def _build_image(self, response, inst):
- if 'image_id' in dict(inst):
- response['imageId'] = inst['image_id']
+ if 'image_ref' in dict(inst):
+ image_ref = inst['image_ref']
+ if str(image_ref).startswith('http'):
+ raise exception.ListingImageRefsNotSupported()
+ response['imageId'] = int(image_ref)
def _build_flavor(self, response, inst):
if 'instance_type' in dict(inst):
@@ -130,9 +138,11 @@ class ViewBuilderV11(ViewBuilder):
self.base_url = base_url
def _build_image(self, response, inst):
- if "image_id" in dict(inst):
- image_id = inst.get("image_id")
- response["imageRef"] = self.image_builder.generate_href(image_id)
+ if 'image_ref' in dict(inst):
+ image_href = inst['image_ref']
+ if str(image_href).isdigit():
+ image_href = int(image_href)
+ response['imageRef'] = image_href
def _build_flavor(self, response, inst):
if "instance_type" in dict(inst):
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
new file mode 100644
index 000000000..3f8acf339
--- /dev/null
+++ b/nova/api/openstack/wsgi.py
@@ -0,0 +1,395 @@
+
+import json
+import webob
+from xml.dom import minidom
+from xml.parsers import expat
+
+import faults
+from nova import exception
+from nova import log as logging
+from nova import utils
+from nova import wsgi
+
+
+XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
+XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
+
+LOG = logging.getLogger('nova.api.openstack.wsgi')
+
+
+class Request(webob.Request):
+ """Add some Openstack API-specific logic to the base webob.Request."""
+
+ def best_match_content_type(self):
+ """Determine the requested response content-type.
+
+ Based on the query extension then the Accept header.
+
+ """
+ supported = ('application/json', 'application/xml')
+
+ parts = self.path.rsplit('.', 1)
+ if len(parts) > 1:
+ ctype = 'application/{0}'.format(parts[1])
+ if ctype in supported:
+ return ctype
+
+ bm = self.accept.best_match(supported)
+
+ # default to application/json if we don't find a preference
+ return bm or 'application/json'
+
+ def get_content_type(self):
+ """Determine content type of the request body.
+
+ Does not do any body introspection, only checks header
+
+ """
+ if not "Content-Type" in self.headers:
+ raise exception.InvalidContentType(content_type=None)
+
+ allowed_types = ("application/xml", "application/json")
+ content_type = self.content_type
+
+ if content_type not in allowed_types:
+ raise exception.InvalidContentType(content_type=content_type)
+ else:
+ return content_type
+
+
+class TextDeserializer(object):
+ """Custom request body deserialization based on controller action name."""
+
+ def deserialize(self, datastring, action='default'):
+ """Find local deserialization method and parse request body."""
+ action_method = getattr(self, str(action), self.default)
+ return action_method(datastring)
+
+ def default(self, datastring):
+ """Default deserialization code should live here"""
+ raise NotImplementedError()
+
+
+class JSONDeserializer(TextDeserializer):
+
+ def default(self, datastring):
+ try:
+ return utils.loads(datastring)
+ except ValueError:
+ raise exception.MalformedRequestBody(
+ reason=_("malformed JSON in request body"))
+
+
+class XMLDeserializer(TextDeserializer):
+
+ def __init__(self, metadata=None):
+ """
+ :param metadata: information needed to deserialize xml into
+ a dictionary.
+ """
+ super(XMLDeserializer, self).__init__()
+ self.metadata = metadata or {}
+
+ def default(self, datastring):
+ plurals = set(self.metadata.get('plurals', {}))
+
+ try:
+ node = minidom.parseString(datastring).childNodes[0]
+ return {node.nodeName: self._from_xml_node(node, plurals)}
+ except expat.ExpatError:
+ raise exception.MalformedRequestBody(
+ reason=_("malformed XML in request body"))
+
+ def _from_xml_node(self, node, listnames):
+ """Convert a minidom node to a simple Python type.
+
+ :param listnames: list of XML node names whose subnodes should
+ be considered list items.
+
+ """
+ if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
+ return node.childNodes[0].nodeValue
+ elif node.nodeName in listnames:
+ return [self._from_xml_node(n, listnames) for n in node.childNodes]
+ else:
+ result = dict()
+ for attr in node.attributes.keys():
+ result[attr] = node.attributes[attr].nodeValue
+ for child in node.childNodes:
+ if child.nodeType != node.TEXT_NODE:
+ result[child.nodeName] = self._from_xml_node(child,
+ listnames)
+ return result
+
+
+class RequestDeserializer(object):
+ """Break up a Request object into more useful pieces."""
+
+ def __init__(self, deserializers=None):
+ """
+ :param deserializers: dictionary of content-type-specific deserializers
+
+ """
+ self.deserializers = {
+ 'application/xml': XMLDeserializer(),
+ 'application/json': JSONDeserializer(),
+ }
+
+ self.deserializers.update(deserializers or {})
+
+ def deserialize(self, request):
+ """Extract necessary pieces of the request.
+
+ :param request: Request object
+ :returns tuple of expected controller action name, dictionary of
+ keyword arguments to pass to the controller, the expected
+ content type of the response
+
+ """
+ action_args = self.get_action_args(request.environ)
+ action = action_args.pop('action', None)
+
+ if request.method.lower() in ('post', 'put'):
+ if len(request.body) == 0:
+ action_args['body'] = None
+ else:
+ content_type = request.get_content_type()
+ deserializer = self.get_deserializer(content_type)
+
+ try:
+ body = deserializer.deserialize(request.body, action)
+ action_args['body'] = body
+ except exception.InvalidContentType:
+ action_args['body'] = None
+
+ accept = self.get_expected_content_type(request)
+
+ return (action, action_args, accept)
+
+ def get_deserializer(self, content_type):
+ try:
+ return self.deserializers[content_type]
+ except (KeyError, TypeError):
+ raise exception.InvalidContentType(content_type=content_type)
+
+ def get_expected_content_type(self, request):
+ return request.best_match_content_type()
+
+ def get_action_args(self, request_environment):
+ """Parse dictionary created by routes library."""
+ try:
+ args = request_environment['wsgiorg.routing_args'][1].copy()
+ except Exception:
+ return {}
+
+ try:
+ del args['controller']
+ except KeyError:
+ pass
+
+ try:
+ del args['format']
+ except KeyError:
+ pass
+
+ return args
+
+
+class DictSerializer(object):
+ """Custom response body serialization based on controller action name."""
+
+ def serialize(self, data, action='default'):
+ """Find local serialization method and encode response body."""
+ action_method = getattr(self, str(action), self.default)
+ return action_method(data)
+
+ def default(self, data):
+ """Default serialization code should live here"""
+ raise NotImplementedError()
+
+
+class JSONDictSerializer(DictSerializer):
+
+ def default(self, data):
+ return utils.dumps(data)
+
+
+class XMLDictSerializer(DictSerializer):
+
+ def __init__(self, metadata=None, xmlns=None):
+ """
+ :param metadata: information needed to deserialize xml into
+ a dictionary.
+ :param xmlns: XML namespace to include with serialized xml
+ """
+ super(XMLDictSerializer, self).__init__()
+ self.metadata = metadata or {}
+ self.xmlns = xmlns
+
+ def default(self, data):
+ # We expect data to contain a single key which is the XML root.
+ root_key = data.keys()[0]
+ doc = minidom.Document()
+ node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
+
+ xmlns = node.getAttribute('xmlns')
+ if not xmlns and self.xmlns:
+ node.setAttribute('xmlns', self.xmlns)
+
+ return node.toprettyxml(indent=' ', encoding='utf-8')
+
+ def _to_xml_node(self, doc, metadata, nodename, data):
+ """Recursive method to convert data members to XML nodes."""
+ result = doc.createElement(nodename)
+
+ # Set the xml namespace if one is specified
+ # TODO(justinsb): We could also use prefixes on the keys
+ xmlns = metadata.get('xmlns', None)
+ if xmlns:
+ result.setAttribute('xmlns', xmlns)
+
+ #TODO(bcwaldon): accomplish this without a type-check
+ if type(data) is list:
+ collections = metadata.get('list_collections', {})
+ if nodename in collections:
+ metadata = collections[nodename]
+ for item in data:
+ node = doc.createElement(metadata['item_name'])
+ node.setAttribute(metadata['item_key'], str(item))
+ result.appendChild(node)
+ return result
+ singular = metadata.get('plurals', {}).get(nodename, None)
+ if singular is None:
+ if nodename.endswith('s'):
+ singular = nodename[:-1]
+ else:
+ singular = 'item'
+ for item in data:
+ node = self._to_xml_node(doc, metadata, singular, item)
+ result.appendChild(node)
+ #TODO(bcwaldon): accomplish this without a type-check
+ elif type(data) is dict:
+ collections = metadata.get('dict_collections', {})
+ if nodename in collections:
+ metadata = collections[nodename]
+ for k, v in data.items():
+ node = doc.createElement(metadata['item_name'])
+ node.setAttribute(metadata['item_key'], str(k))
+ text = doc.createTextNode(str(v))
+ node.appendChild(text)
+ result.appendChild(node)
+ return result
+ attrs = metadata.get('attributes', {}).get(nodename, {})
+ for k, v in data.items():
+ if k in attrs:
+ result.setAttribute(k, str(v))
+ else:
+ node = self._to_xml_node(doc, metadata, k, v)
+ result.appendChild(node)
+ else:
+ # Type is atom
+ node = doc.createTextNode(str(data))
+ result.appendChild(node)
+ return result
+
+
+class ResponseSerializer(object):
+ """Encode the necessary pieces into a response object"""
+
+ def __init__(self, serializers=None):
+ """
+ :param serializers: dictionary of content-type-specific serializers
+
+ """
+ self.serializers = {
+ 'application/xml': XMLDictSerializer(),
+ 'application/json': JSONDictSerializer(),
+ }
+ self.serializers.update(serializers or {})
+
+ def serialize(self, response_data, content_type, action='default'):
+ """Serialize a dict into a string and wrap in a wsgi.Request object.
+
+ :param response_data: dict produced by the Controller
+ :param content_type: expected mimetype of serialized response body
+
+ """
+ response = webob.Response()
+ response.headers['Content-Type'] = content_type
+
+ serializer = self.get_serializer(content_type)
+ response.body = serializer.serialize(response_data, action)
+
+ return response
+
+ def get_serializer(self, content_type):
+ try:
+ return self.serializers[content_type]
+ except (KeyError, TypeError):
+ raise exception.InvalidContentType(content_type=content_type)
+
+
+class Resource(wsgi.Application):
+ """WSGI app that handles (de)serialization and controller dispatch.
+
+ WSGI app that reads routing information supplied by RoutesMiddleware
+ and calls the requested action method upon its controller. All
+ controller action methods must accept a 'req' argument, which is the
+ incoming wsgi.Request. If the operation is a PUT or POST, the controller
+ method must also accept a 'body' argument (the deserialized request body).
+ They may raise a webob.exc exception or return a dict, which will be
+ serialized by requested content type.
+
+ """
+ def __init__(self, controller, serializers=None, deserializers=None):
+ """
+ :param controller: object that implement methods created by routes lib
+ :param serializers: dict of content-type specific text serializers
+ :param deserializers: dict of content-type specific text deserializers
+
+ """
+ self.controller = controller
+ self.serializer = ResponseSerializer(serializers)
+ self.deserializer = RequestDeserializer(deserializers)
+
+ @webob.dec.wsgify(RequestClass=Request)
+ def __call__(self, request):
+ """WSGI method that controls (de)serialization and method dispatch."""
+
+ LOG.debug("%(method)s %(url)s" % {"method": request.method,
+ "url": request.url})
+
+ try:
+ action, action_args, accept = self.deserializer.deserialize(
+ request)
+ except exception.InvalidContentType:
+ return webob.exc.HTTPBadRequest(_("Unsupported Content-Type"))
+ except exception.MalformedRequestBody:
+ explanation = _("Malformed request body")
+ return faults.Fault(webob.exc.HTTPBadRequest(
+ explanation=explanation))
+
+ action_result = self.dispatch(request, action, action_args)
+
+ #TODO(bcwaldon): find a more elegant way to pass through non-dict types
+ if type(action_result) is dict:
+ response = self.serializer.serialize(action_result, accept, action)
+ else:
+ response = action_result
+
+ try:
+ msg_dict = dict(url=request.url, status=response.status_int)
+ msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
+ except AttributeError, e:
+ msg_dict = dict(url=request.url, e=e)
+ msg = _("%(url)s returned a fault: %(e)s" % msg_dict)
+
+ LOG.debug(msg)
+
+ return response
+
+ def dispatch(self, request, action, action_args):
+ """Find action-spefic method on controller and call it."""
+
+ controller_method = getattr(self.controller, action)
+ return controller_method(req=request, **action_args)
diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py
index af73d8f6d..8864f825b 100644
--- a/nova/api/openstack/zones.py
+++ b/nova/api/openstack/zones.py
@@ -21,14 +21,17 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
-from nova.api.openstack import common
+
+from nova.compute import api as compute
from nova.scheduler import api
+from nova.api.openstack import create_instance_helper as helper
+from nova.api.openstack import common
+from nova.api.openstack import faults
+from nova.api.openstack import wsgi
+
FLAGS = flags.FLAGS
-flags.DEFINE_string('build_plan_encryption_key',
- None,
- '128bit (hex) encryption key for scheduler build plans.')
LOG = logging.getLogger('nova.api.openstack.zones')
@@ -52,12 +55,20 @@ def _scrub_zone(zone):
'deleted', 'deleted_at', 'updated_at'))
-class Controller(common.OpenstackController):
+def check_encryption_key(func):
+ def wrapped(*args, **kwargs):
+ if not FLAGS.build_plan_encryption_key:
+ raise exception.Error(_("--build_plan_encryption_key not set"))
+ return func(*args, **kwargs)
+ return wrapped
+
+
+class Controller(object):
+ """Controller for Zone resources."""
- _serialization_metadata = {
- 'application/xml': {
- "attributes": {
- "zone": ["id", "api_url", "name", "capabilities"]}}}
+ def __init__(self):
+ self.compute_api = compute.API()
+ self.helper = helper.CreateInstanceHelper(self)
def index(self, req):
"""Return all zones in brief"""
@@ -92,36 +103,46 @@ class Controller(common.OpenstackController):
return dict(zone=_scrub_zone(zone))
def delete(self, req, id):
+ """Delete a child zone entry."""
zone_id = int(id)
api.zone_delete(req.environ['nova.context'], zone_id)
return {}
- def create(self, req):
+ def create(self, req, body):
+ """Create a child zone entry."""
context = req.environ['nova.context']
- env = self._deserialize(req.body, req.get_content_type())
- zone = api.zone_create(context, env["zone"])
+ zone = api.zone_create(context, body["zone"])
return dict(zone=_scrub_zone(zone))
- def update(self, req, id):
+ def update(self, req, id, body):
+ """Update a child zone entry."""
context = req.environ['nova.context']
- env = self._deserialize(req.body, req.get_content_type())
zone_id = int(id)
- zone = api.zone_update(context, zone_id, env["zone"])
+ zone = api.zone_update(context, zone_id, body["zone"])
return dict(zone=_scrub_zone(zone))
- def select(self, req):
+ def boot(self, req, body):
+ """Creates a new server for a given user while being Zone aware.
+
+ Returns a reservation ID (a UUID).
+ """
+ result = None
+ try:
+ extra_values, result = self.helper.create_instance(req, body,
+ self.compute_api.create_all_at_once)
+ except faults.Fault, f:
+ return f
+
+ reservation_id = result
+ return {'reservation_id': reservation_id}
+
+ @check_encryption_key
+ def select(self, req, body):
"""Returns a weighted list of costs to create instances
of desired capabilities."""
ctx = req.environ['nova.context']
- qs = req.environ['QUERY_STRING']
- param_dict = urlparse.parse_qs(qs)
- param_dict.pop("fresh", None)
- # parse_qs returns a dict where the values are lists,
- # since query strings can have multiple values for the
- # same key. We need to convert that to single values.
- for key in param_dict:
- param_dict[key] = param_dict[key][0]
- build_plan = api.select(ctx, specs=param_dict)
+ specs = json.loads(body)
+ build_plan = api.select(ctx, specs=specs)
cooked = self._scrub_build_plan(build_plan)
return {"weights": cooked}
@@ -129,9 +150,6 @@ class Controller(common.OpenstackController):
"""Remove all the confidential data and return a sanitized
version of the build plan. Include an encrypted full version
of the weighting entry so we can get back to it later."""
- if not FLAGS.build_plan_encryption_key:
- raise exception.FlagNotSet(flag='build_plan_encryption_key')
-
encryptor = crypto.encryptor(FLAGS.build_plan_encryption_key)
cooked = []
for entry in build_plan:
@@ -140,3 +158,52 @@ class Controller(common.OpenstackController):
cooked.append(dict(weight=entry['weight'],
blob=cipher_text))
return cooked
+
+ def _image_ref_from_req_data(self, data):
+ return data['server']['imageId']
+
+ def _flavor_id_from_req_data(self, data):
+ return data['server']['flavorId']
+
+ def _get_server_admin_password(self, server):
+ """ Determine the admin password for a server on creation """
+ return self.helper._get_server_admin_password_old_style(server)
+
+
+class ControllerV11(object):
+ """Controller for 1.1 Zone resources."""
+
+ def _get_server_admin_password(self, server):
+ """ Determine the admin password for a server on creation """
+ return self.helper._get_server_admin_password_new_style(server)
+
+ def _image_ref_from_req_data(self, data):
+ return data['server']['imageRef']
+
+ def _flavor_id_from_req_data(self, data):
+ return data['server']['flavorRef']
+
+
+def create_resource(version):
+ controller = {
+ '1.0': Controller,
+ '1.1': ControllerV11,
+ }[version]()
+
+ metadata = {
+ "attributes": {
+ "zone": ["id", "api_url", "name", "capabilities"],
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10,
+ metadata=metadata),
+ }
+
+ deserializers = {
+ 'application/xml': helper.ServerXMLDeserializer(),
+ }
+
+ return wsgi.Resource(controller, serializers=serializers,
+ deserializers=deserializers)
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index 3f8432851..e9532473d 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -24,6 +24,7 @@ other backends by creating another class that exposes the same
public methods.
"""
+import functools
import sys
from nova import exception
@@ -68,6 +69,12 @@ flags.DEFINE_string('ldap_developer',
LOG = logging.getLogger("nova.ldapdriver")
+if FLAGS.memcached_servers:
+ import memcache
+else:
+ from nova import fakememcache as memcache
+
+
# TODO(vish): make an abstract base class with the same public methods
# to define a set interface for AuthDrivers. I'm delaying
# creating this now because I'm expecting an auth refactor
@@ -85,6 +92,7 @@ def _clean(attr):
def sanitize(fn):
"""Decorator to sanitize all args"""
+ @functools.wraps(fn)
def _wrapped(self, *args, **kwargs):
args = [_clean(x) for x in args]
kwargs = dict((k, _clean(v)) for (k, v) in kwargs)
@@ -103,29 +111,56 @@ class LdapDriver(object):
isadmin_attribute = 'isNovaAdmin'
project_attribute = 'owner'
project_objectclass = 'groupOfNames'
+ conn = None
+ mc = None
def __init__(self):
"""Imports the LDAP module"""
self.ldap = __import__('ldap')
- self.conn = None
if FLAGS.ldap_schema_version == 1:
LdapDriver.project_pattern = '(objectclass=novaProject)'
LdapDriver.isadmin_attribute = 'isAdmin'
LdapDriver.project_attribute = 'projectManager'
LdapDriver.project_objectclass = 'novaProject'
+ self.__cache = None
+ if LdapDriver.conn is None:
+ LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url)
+ LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn,
+ FLAGS.ldap_password)
+ if LdapDriver.mc is None:
+ LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
def __enter__(self):
- """Creates the connection to LDAP"""
- self.conn = self.ldap.initialize(FLAGS.ldap_url)
- self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password)
+ # TODO(yorik-sar): Should be per-request cache, not per-driver-request
+ self.__cache = {}
return self
def __exit__(self, exc_type, exc_value, traceback):
- """Destroys the connection to LDAP"""
- self.conn.unbind_s()
+ self.__cache = None
return False
+ def __local_cache(key_fmt): # pylint: disable=E0213
+ """Wrap function to cache it's result in self.__cache.
+ Works only with functions with one fixed argument.
+ """
+ def do_wrap(fn):
+ @functools.wraps(fn)
+ def inner(self, arg, **kwargs):
+ cache_key = key_fmt % (arg,)
+ try:
+ res = self.__cache[cache_key]
+ LOG.debug('Local cache hit for %s by key %s' %
+ (fn.__name__, cache_key))
+ return res
+ except KeyError:
+ res = fn(self, arg, **kwargs)
+ self.__cache[cache_key] = res
+ return res
+ return inner
+ return do_wrap
+
@sanitize
+ @__local_cache('uid_user-%s')
def get_user(self, uid):
"""Retrieve user by id"""
attr = self.__get_ldap_user(uid)
@@ -134,15 +169,31 @@ class LdapDriver(object):
@sanitize
def get_user_from_access_key(self, access):
"""Retrieve user by access key"""
+ cache_key = 'uak_dn_%s' % (access,)
+ user_dn = self.mc.get(cache_key)
+ if user_dn:
+ user = self.__to_user(
+ self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE))
+ if user:
+ if user['access'] == access:
+ return user
+ else:
+ self.mc.set(cache_key, None)
query = '(accessKey=%s)' % access
dn = FLAGS.ldap_user_subtree
- return self.__to_user(self.__find_object(dn, query))
+ user_obj = self.__find_object(dn, query)
+ user = self.__to_user(user_obj)
+ if user:
+ self.mc.set(cache_key, user_obj['dn'][0])
+ return user
@sanitize
+ @__local_cache('pid_project-%s')
def get_project(self, pid):
"""Retrieve project by id"""
- dn = self.__project_to_dn(pid)
- attr = self.__find_object(dn, LdapDriver.project_pattern)
+ dn = self.__project_to_dn(pid, search=False)
+ attr = self.__find_object(dn, LdapDriver.project_pattern,
+ scope=self.ldap.SCOPE_BASE)
return self.__to_project(attr)
@sanitize
@@ -395,6 +446,7 @@ class LdapDriver(object):
"""Check if project exists"""
return self.get_project(project_id) is not None
+ @__local_cache('uid_attrs-%s')
def __get_ldap_user(self, uid):
"""Retrieve LDAP user entry by id"""
dn = FLAGS.ldap_user_subtree
@@ -426,12 +478,20 @@ class LdapDriver(object):
if scope is None:
# One of the flags is 0!
scope = self.ldap.SCOPE_SUBTREE
+ if query is None:
+ query = "(objectClass=*)"
try:
res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# Just return the attributes
- return [attributes for dn, attributes in res]
+ # FIXME(yorik-sar): Whole driver should be refactored to
+ # prevent this hack
+ res1 = []
+ for dn, attrs in res:
+ attrs['dn'] = [dn]
+ res1.append(attrs)
+ return res1
def __find_role_dns(self, tree):
"""Find dns of role objects in given tree"""
@@ -564,6 +624,7 @@ class LdapDriver(object):
'description': attr.get('description', [None])[0],
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
+ @__local_cache('uid_dn-%s')
def __uid_to_dn(self, uid, search=True):
"""Convert uid to dn"""
# By default return a generated DN
@@ -576,6 +637,7 @@ class LdapDriver(object):
userdn = user[0]
return userdn
+ @__local_cache('pid_dn-%s')
def __project_to_dn(self, pid, search=True):
"""Convert pid to dn"""
# By default return a generated DN
@@ -603,16 +665,18 @@ class LdapDriver(object):
else:
return None
+ @__local_cache('dn_uid-%s')
def __dn_to_uid(self, dn):
"""Convert user dn to uid"""
query = '(objectclass=novaUser)'
- user = self.__find_object(dn, query)
+ user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE)
return user[FLAGS.ldap_user_id_attribute][0]
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
- def __init__(self): # pylint: disable=W0231
- __import__('nova.auth.fakeldap')
- self.ldap = sys.modules['nova.auth.fakeldap']
+ def __init__(self):
+ import nova.auth.fakeldap
+ sys.modules['ldap'] = nova.auth.fakeldap
+ super(FakeLdapDriver, self).__init__()
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 07235a2a7..98c7dd263 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -73,6 +73,12 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
LOG = logging.getLogger('nova.auth.manager')
+if FLAGS.memcached_servers:
+ import memcache
+else:
+ from nova import fakememcache as memcache
+
+
class AuthBase(object):
"""Base class for objects relating to auth
@@ -206,6 +212,7 @@ class AuthManager(object):
"""
_instance = None
+ mc = None
def __new__(cls, *args, **kwargs):
"""Returns the AuthManager singleton"""
@@ -222,13 +229,8 @@ class AuthManager(object):
self.network_manager = utils.import_object(FLAGS.network_manager)
if driver or not getattr(self, 'driver', None):
self.driver = utils.import_class(driver or FLAGS.auth_driver)
-
- if FLAGS.memcached_servers:
- import memcache
- else:
- from nova import fakememcache as memcache
- self.mc = memcache.Client(FLAGS.memcached_servers,
- debug=0)
+ if AuthManager.mc is None:
+ AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
def authenticate(self, access, signature, params, verb='GET',
server_string='127.0.0.1:8773', path='/',
diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template
index 8170fcafe..d05c099d7 100644
--- a/nova/auth/novarc.template
+++ b/nova/auth/novarc.template
@@ -1,4 +1,6 @@
-NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE}))
+NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) ||
+ NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}")
+NOVA_KEY_DIR=${NOVARC%%/*}
export EC2_ACCESS_KEY="%(access)s:%(project)s"
export EC2_SECRET_KEY="%(secret)s"
export EC2_URL="%(ec2)s"
@@ -12,4 +14,5 @@ alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_P
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
export NOVA_API_KEY="%(access)s"
export NOVA_USERNAME="%(user)s"
+export NOVA_PROJECT_ID="%(project)s"
export NOVA_URL="%(os)s"
diff --git a/nova/compute/api.py b/nova/compute/api.py
index de774e807..e2c4cf8d7 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -18,7 +18,6 @@
"""Handles all requests relating to instances (guest vms)."""
-import datetime
import eventlet
import re
import time
@@ -26,6 +25,7 @@ import time
from nova import db
from nova import exception
from nova import flags
+import nova.image
from nova import log as logging
from nova import network
from nova import quota
@@ -58,9 +58,9 @@ class API(base.Base):
def __init__(self, image_service=None, network_api=None,
volume_api=None, hostname_factory=generate_default_hostname,
**kwargs):
- if not image_service:
- image_service = utils.import_object(FLAGS.image_service)
- self.image_service = image_service
+ self.image_service = image_service or \
+ nova.image.get_default_image_service()
+
if not network_api:
network_api = network.API()
self.network_api = network_api
@@ -128,18 +128,17 @@ class API(base.Base):
LOG.warn(msg)
raise quota.QuotaError(msg, "MetadataLimitExceeded")
- def create(self, context, instance_type,
- image_id, kernel_id=None, ramdisk_id=None,
+ def _check_create_parameters(self, context, instance_type,
+ image_href, kernel_id=None, ramdisk_id=None,
min_count=1, max_count=1,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
- injected_files=None,
- admin_password=None):
- """Create the number and type of instances requested.
+ injected_files=None, admin_password=None, zone_blob=None,
+ reservation_id=None):
+ """Verify all the input parameters regardless of the provisioning
+ strategy being performed."""
- Verifies that quota and other arguments are valid.
- """
if not instance_type:
instance_type = instance_types.get_default_instance_type()
@@ -160,11 +159,15 @@ class API(base.Base):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, injected_files)
- image = self.image_service.show(context, image_id)
+ (image_service, image_id) = nova.image.get_image_service(image_href)
+ image = image_service.show(context, image_id)
os_type = None
if 'properties' in image and 'os_type' in image['properties']:
os_type = image['properties']['os_type']
+ vm_mode = None
+ if 'properties' in image and 'vm_mode' in image['properties']:
+ vm_mode = image['properties']['vm_mode']
if kernel_id is None:
kernel_id = image['properties'].get('kernel_id', None)
@@ -180,9 +183,9 @@ class API(base.Base):
logging.debug("Using Kernel=%s, Ramdisk=%s" %
(kernel_id, ramdisk_id))
if kernel_id:
- self.image_service.show(context, kernel_id)
+ image_service.show(context, kernel_id)
if ramdisk_id:
- self.image_service.show(context, ramdisk_id)
+ image_service.show(context, ramdisk_id)
if security_group is None:
security_group = ['default']
@@ -201,9 +204,12 @@ class API(base.Base):
key_pair = db.key_pair_get(context, context.user_id, key_name)
key_data = key_pair['public_key']
+ if reservation_id is None:
+ reservation_id = utils.generate_uid('r')
+
base_options = {
- 'reservation_id': utils.generate_uid('r'),
- 'image_id': image_id,
+ 'reservation_id': reservation_id,
+ 'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'state': 0,
@@ -223,62 +229,151 @@ class API(base.Base):
'locked': False,
'metadata': metadata,
'availability_zone': availability_zone,
- 'os_type': os_type}
+ 'os_type': os_type,
+ 'vm_mode': vm_mode}
+
+ return (num_instances, base_options, security_groups)
+
+ def create_db_entry_for_new_instance(self, context, base_options,
+ security_groups, num=1):
+ """Create an entry in the DB for this new instance,
+ including any related table updates (such as security
+ groups, MAC address, etc). This will called by create()
+ in the majority of situations, but all-at-once style
+ Schedulers may initiate the call."""
+ instance = dict(mac_address=utils.generate_mac(),
+ launch_index=num,
+ **base_options)
+ instance = self.db.instance_create(context, instance)
+ instance_id = instance['id']
+
elevated = context.elevated()
- instances = []
- LOG.debug(_("Going to run %s instances..."), num_instances)
- for num in range(num_instances):
- instance = dict(mac_address=utils.generate_mac(),
- launch_index=num,
- **base_options)
- instance = self.db.instance_create(context, instance)
- instance_id = instance['id']
+ if not security_groups:
+ security_groups = []
+ for security_group_id in security_groups:
+ self.db.instance_add_security_group(elevated,
+ instance_id,
+ security_group_id)
- elevated = context.elevated()
- if not security_groups:
- security_groups = []
- for security_group_id in security_groups:
- self.db.instance_add_security_group(elevated,
- instance_id,
- security_group_id)
-
- # Set sane defaults if not specified
- updates = dict(hostname=self.hostname_factory(instance_id))
- if (not hasattr(instance, 'display_name') or
- instance.display_name is None):
- updates['display_name'] = "Server %s" % instance_id
-
- instance = self.update(context, instance_id, **updates)
- instances.append(instance)
+ # Set sane defaults if not specified
+ updates = dict(hostname=self.hostname_factory(instance_id))
+ if (not hasattr(instance, 'display_name') or
+ instance.display_name is None):
+ updates['display_name'] = "Server %s" % instance_id
- pid = context.project_id
- uid = context.user_id
- LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
- " instance %(instance_id)s") % locals())
-
- # NOTE(sandy): For now we're just going to pass in the
- # instance_type record to the scheduler. In a later phase
- # we'll be ripping this whole for-loop out and deferring the
- # creation of the Instance record. At that point all this will
- # change.
- rpc.cast(context,
- FLAGS.scheduler_topic,
- {"method": "run_instance",
- "args": {"topic": FLAGS.compute_topic,
- "instance_id": instance_id,
- "request_spec": {
- 'instance_type': instance_type,
- 'filter':
- 'nova.scheduler.host_filter.'
- 'InstanceTypeFilter'
- },
- "availability_zone": availability_zone,
- "injected_files": injected_files,
- "admin_password": admin_password}})
+ instance = self.update(context, instance_id, **updates)
for group_id in security_groups:
self.trigger_security_group_members_refresh(elevated, group_id)
+ return instance
+
+ def _ask_scheduler_to_create_instance(self, context, base_options,
+ instance_type, zone_blob,
+ availability_zone, injected_files,
+ admin_password,
+ instance_id=None, num_instances=1):
+ """Send the run_instance request to the schedulers for processing."""
+ pid = context.project_id
+ uid = context.user_id
+ if instance_id:
+ LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
+ " instance %(instance_id)s (single-shot)") % locals())
+ else:
+ LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
+ " (all-at-once)") % locals())
+
+ filter_class = 'nova.scheduler.host_filter.InstanceTypeFilter'
+ request_spec = {
+ 'instance_properties': base_options,
+ 'instance_type': instance_type,
+ 'filter': filter_class,
+ 'blob': zone_blob,
+ 'num_instances': num_instances,
+ }
+
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "run_instance",
+ "args": {"topic": FLAGS.compute_topic,
+ "instance_id": instance_id,
+ "request_spec": request_spec,
+ "availability_zone": availability_zone,
+ "admin_password": admin_password,
+ "injected_files": injected_files}})
+
+ def create_all_at_once(self, context, instance_type,
+ image_href, kernel_id=None, ramdisk_id=None,
+ min_count=1, max_count=1,
+ display_name='', display_description='',
+ key_name=None, key_data=None, security_group='default',
+ availability_zone=None, user_data=None, metadata={},
+ injected_files=None, admin_password=None, zone_blob=None,
+ reservation_id=None):
+ """Provision the instances by passing the whole request to
+ the Scheduler for execution. Returns a Reservation ID
+ related to the creation of all of these instances."""
+ num_instances, base_options, security_groups = \
+ self._check_create_parameters(
+ context, instance_type,
+ image_href, kernel_id, ramdisk_id,
+ min_count, max_count,
+ display_name, display_description,
+ key_name, key_data, security_group,
+ availability_zone, user_data, metadata,
+ injected_files, admin_password, zone_blob,
+ reservation_id)
+
+ self._ask_scheduler_to_create_instance(context, base_options,
+ instance_type, zone_blob,
+ availability_zone, injected_files,
+ admin_password,
+ num_instances=num_instances)
+
+ return base_options['reservation_id']
+
+ def create(self, context, instance_type,
+ image_href, kernel_id=None, ramdisk_id=None,
+ min_count=1, max_count=1,
+ display_name='', display_description='',
+ key_name=None, key_data=None, security_group='default',
+ availability_zone=None, user_data=None, metadata={},
+ injected_files=None, admin_password=None, zone_blob=None,
+ reservation_id=None):
+ """
+ Provision the instances by sending off a series of single
+ instance requests to the Schedulers. This is fine for trival
+ Scheduler drivers, but may remove the effectiveness of the
+ more complicated drivers.
+
+ Returns a list of instance dicts.
+ """
+
+ num_instances, base_options, security_groups = \
+ self._check_create_parameters(
+ context, instance_type,
+ image_href, kernel_id, ramdisk_id,
+ min_count, max_count,
+ display_name, display_description,
+ key_name, key_data, security_group,
+ availability_zone, user_data, metadata,
+ injected_files, admin_password, zone_blob,
+ reservation_id)
+
+ instances = []
+ LOG.debug(_("Going to run %s instances..."), num_instances)
+ for num in range(num_instances):
+ instance = self.create_db_entry_for_new_instance(context,
+ base_options, security_groups, num=num)
+ instances.append(instance)
+ instance_id = instance['id']
+
+ self._ask_scheduler_to_create_instance(context, base_options,
+ instance_type, zone_blob,
+ availability_zone, injected_files,
+ admin_password,
+ instance_id=instance_id)
+
return [dict(x.iteritems()) for x in instances]
def has_finished_migration(self, context, instance_id):
@@ -403,7 +498,7 @@ class API(base.Base):
instance['id'],
state_description='terminating',
state=0,
- terminated_at=datetime.datetime.utcnow())
+ terminated_at=utils.utcnow())
host = instance['host']
if host:
@@ -427,6 +522,24 @@ class API(base.Base):
"""
return self.get(context, instance_id)
+ def get_all_across_zones(self, context, reservation_id):
+ """Get all instances with this reservation_id, across
+ all available Zones (if any).
+ """
+ instances = self.db.instance_get_all_by_reservation(
+ context, reservation_id)
+
+ children = scheduler_api.call_zone_method(context, "list",
+ novaclient_collection_name="servers",
+ reservation_id=reservation_id)
+
+ for zone, servers in children:
+ for server in servers:
+ # Results are ready to send to user. No need to scrub.
+ server._info['_is_precooked'] = True
+ instances.append(server._info)
+ return instances
+
def get_all(self, context, project_id=None, reservation_id=None,
fixed_ip=None):
"""Get all instances filtered by one of the given parameters.
@@ -435,8 +548,7 @@ class API(base.Base):
all instances in the system.
"""
if reservation_id is not None:
- return self.db.instance_get_all_by_reservation(
- context, reservation_id)
+ return self.get_all_across_zones(context, reservation_id)
if fixed_ip is not None:
return self.db.fixed_ip_get_instance(context, fixed_ip)
@@ -513,9 +625,10 @@ class API(base.Base):
:returns: A dict containing image metadata
"""
properties = {'instance_id': str(instance_id),
- 'user_id': str(context.user_id)}
+ 'user_id': str(context.user_id),
+ 'image_state': 'creating'}
sent_meta = {'name': name, 'is_public': False,
- 'properties': properties}
+ 'status': 'creating', 'properties': properties}
recv_meta = self.image_service.create(context, sent_meta)
params = {'image_id': recv_meta['id']}
self._cast_compute_message('snapshot_instance', context, instance_id,
@@ -526,8 +639,8 @@ class API(base.Base):
"""Reboot the given instance."""
self._cast_compute_message('reboot_instance', context, instance_id)
- def rebuild(self, context, instance_id, image_id, name=None, metadata=None,
- files_to_inject=None):
+ def rebuild(self, context, instance_id, image_href, name=None,
+ metadata=None, files_to_inject=None):
"""Rebuild the given instance with the provided metadata."""
instance = db.api.instance_get(context, instance_id)
@@ -547,7 +660,7 @@ class API(base.Base):
self.db.instance_update(context, instance_id, values)
rebuild_params = {
- "image_id": image_id,
+ "image_ref": image_href,
"injected_files": files_to_inject,
}
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 1275a6fdd..1d246e445 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -114,7 +114,7 @@ def get_instance_type(id):
ctxt = context.get_admin_context()
return db.instance_type_get_by_id(ctxt, id)
except exception.DBError:
- raise exception.ApiError(_("Unknown instance type: %s") % name)
+ raise exception.ApiError(_("Unknown instance type: %s") % id)
def get_instance_type_by_name(name):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 3897b3a9e..245958de7 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -35,7 +35,6 @@ terminating it.
"""
-import datetime
import os
import socket
import sys
@@ -159,12 +158,12 @@ class ComputeManager(manager.SchedulerDependentManager):
def _update_launched_at(self, context, instance_id, launched_at=None):
"""Update the launched_at parameter of the given instance."""
- data = {'launched_at': launched_at or datetime.datetime.utcnow()}
+ data = {'launched_at': launched_at or utils.utcnow()}
self.db.instance_update(context, instance_id, data)
- def _update_image_id(self, context, instance_id, image_id):
+ def _update_image_ref(self, context, instance_id, image_ref):
"""Update the image_id for the given instance."""
- data = {'image_id': image_id}
+ data = {'image_ref': image_ref}
self.db.instance_update(context, instance_id, data)
def get_console_topic(self, context, **kwargs):
@@ -235,7 +234,7 @@ class ComputeManager(manager.SchedulerDependentManager):
power_state.NOSTATE,
'networking')
- is_vpn = instance_ref['image_id'] == str(FLAGS.vpn_image_id)
+ is_vpn = instance_ref['image_ref'] == str(FLAGS.vpn_image_id)
# NOTE(vish): This could be a cast because we don't do anything
# with the address currently, but I'm leaving it as
# a call to ensure that network setup completes. We
@@ -339,7 +338,7 @@ class ComputeManager(manager.SchedulerDependentManager):
:param context: `nova.RequestContext` object
:param instance_id: Instance identifier (integer)
- :param image_id: Image identifier (integer)
+ :param image_ref: Image identifier (href or integer)
"""
context = context.elevated()
@@ -349,11 +348,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_state(context, instance_id, power_state.BUILDING)
self.driver.destroy(instance_ref)
- instance_ref.image_id = kwargs.get('image_id')
+ image_ref = kwargs.get('image_ref')
+ instance_ref.image_ref = image_ref
instance_ref.injected_files = kwargs.get('injected_files', [])
self.driver.spawn(instance_ref)
- self._update_image_id(context, instance_id, image_id)
+ self._update_image_ref(context, instance_id, image_ref)
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 3bb54a382..9d8e2a25d 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -36,6 +36,7 @@ from twisted.application import service
from nova import flags
from nova import log as logging
+from nova import utils
from nova.virt import connection as virt_connection
@@ -86,7 +87,7 @@ RRD_VALUES = {
]}
-utcnow = datetime.datetime.utcnow
+utcnow = utils.utcnow
LOG = logging.getLogger('nova.compute.monitor')
diff --git a/nova/console/vmrc.py b/nova/console/vmrc.py
index cc8b0cdf5..fa805e019 100644
--- a/nova/console/vmrc.py
+++ b/nova/console/vmrc.py
@@ -119,7 +119,7 @@ class VMRCSessionConsole(VMRCConsole):
"""
vms = vim_session._call_method(vim_util, 'get_objects',
'VirtualMachine', ['name'])
- vm_ref = NoneV
+ vm_ref = None
for vm in vms:
if vm.propSet[0].val == instance_name:
vm_ref = vm.obj
diff --git a/nova/context.py b/nova/context.py
index c113f7ea7..99085ed75 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -18,7 +18,6 @@
"""RequestContext: context for requests that persist through all of nova."""
-import datetime
import random
from nova import exception
diff --git a/nova/crypto.py b/nova/crypto.py
index bdc32482a..8d535f426 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -176,7 +176,8 @@ def revoke_certs_by_project(project_id):
def revoke_certs_by_user_and_project(user_id, project_id):
"""Revoke certs for user in project."""
admin = context.get_admin_context()
- for cert in db.certificate_get_all_by_user(admin, user_id, project_id):
+ for cert in db.certificate_get_all_by_user_and_project(admin,
+ user_id, project_id):
revoke_cert(cert['project_id'], cert['file_name'])
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index e586bbc64..478837f7f 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -19,7 +19,6 @@
Implementation of SQLAlchemy backend.
"""
-import datetime
import warnings
from nova import db
@@ -61,9 +60,7 @@ def is_user_context(context):
def authorize_project_context(context, project_id):
- """Ensures that the request context has permission to access the
- given project.
- """
+ """Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project:
raise exception.NotAuthorized()
@@ -72,9 +69,7 @@ def authorize_project_context(context, project_id):
def authorize_user_context(context, user_id):
- """Ensures that the request context has permission to access the
- given user.
- """
+ """Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user:
raise exception.NotAuthorized()
@@ -90,9 +85,12 @@ def can_read_deleted(context):
def require_admin_context(f):
- """Decorator used to indicate that the method requires an
- administrator context.
+ """Decorator to require admin request context.
+
+ The first argument to the wrapped function must be the context.
+
"""
+
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
raise exception.AdminRequired()
@@ -101,12 +99,19 @@ def require_admin_context(f):
def require_context(f):
- """Decorator used to indicate that the method requires either
- an administrator or normal user context.
+ """Decorator to require *any* user or admin context.
+
+ This does no authorization for user or project access matching, see
+ :py:func:`authorize_project_context` and
+ :py:func:`authorize_user_context`.
+
+ The first argument to the wrapped function must be the context.
+
"""
+
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
- raise exception.AdminRequired()
+ raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
@@ -674,7 +679,7 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time):
filter_by(allocated=0).\
update({'instance_id': None,
'leased': 0,
- 'updated_at': datetime.datetime.utcnow()},
+ 'updated_at': utils.utcnow()},
synchronize_session='fetch')
return result
@@ -738,7 +743,7 @@ def fixed_ip_get_all_by_instance(context, instance_id):
filter_by(instance_id=instance_id).\
filter_by(deleted=False)
if not rv:
- raise exception.NoFloatingIpsFoundForInstance(instance_id=instance_id)
+ raise exception.NoFixedIpsFoundForInstance(instance_id=instance_id)
return rv
@@ -820,17 +825,17 @@ def instance_destroy(context, instance_id):
session.query(models.Instance).\
filter_by(id=instance_id).\
update({'deleted': True,
- 'deleted_at': datetime.datetime.utcnow(),
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_id=instance_id).\
update({'deleted': True,
- 'deleted_at': datetime.datetime.utcnow(),
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.InstanceMetadata).\
filter_by(instance_id=instance_id).\
update({'deleted': True,
- 'deleted_at': datetime.datetime.utcnow(),
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -902,6 +907,7 @@ def instance_get_all_by_host(context, host):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(host=host).\
filter_by(deleted=can_read_deleted(context)).\
@@ -917,6 +923,7 @@ def instance_get_all_by_project(context, project_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=project_id).\
filter_by(deleted=can_read_deleted(context)).\
@@ -932,6 +939,7 @@ def instance_get_all_by_reservation(context, reservation_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(reservation_id=reservation_id).\
filter_by(deleted=can_read_deleted(context)).\
@@ -941,6 +949,7 @@ def instance_get_all_by_reservation(context, reservation_id):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=context.project_id).\
filter_by(reservation_id=reservation_id).\
@@ -954,9 +963,11 @@ def instance_get_project_vpn(context, project_id):
return session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=project_id).\
- filter_by(image_id=str(FLAGS.vpn_image_id)).\
+ filter_by(image_ref=str(FLAGS.vpn_image_id)).\
filter_by(deleted=can_read_deleted(context)).\
first()
@@ -1122,8 +1133,8 @@ def key_pair_destroy_all_by_user(context, user_id):
with session.begin():
session.query(models.KeyPair).\
filter_by(user_id=user_id).\
- update({'deleted': 1,
- 'deleted_at': datetime.datetime.utcnow(),
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -1654,8 +1665,8 @@ def volume_destroy(context, volume_id):
with session.begin():
session.query(models.Volume).\
filter_by(id=volume_id).\
- update({'deleted': 1,
- 'deleted_at': datetime.datetime.utcnow(),
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.ExportDevice).\
filter_by(volume_id=volume_id).\
@@ -1812,8 +1823,8 @@ def snapshot_destroy(context, snapshot_id):
with session.begin():
session.query(models.Snapshot).\
filter_by(id=snapshot_id).\
- update({'deleted': 1,
- 'deleted_at': datetime.datetime.utcnow(),
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -1967,18 +1978,18 @@ def security_group_destroy(context, security_group_id):
with session.begin():
session.query(models.SecurityGroup).\
filter_by(id=security_group_id).\
- update({'deleted': 1,
- 'deleted_at': datetime.datetime.utcnow(),
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(security_group_id=security_group_id).\
- update({'deleted': 1,
- 'deleted_at': datetime.datetime.utcnow(),
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupIngressRule).\
filter_by(group_id=security_group_id).\
- update({'deleted': 1,
- 'deleted_at': datetime.datetime.utcnow(),
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -1988,12 +1999,12 @@ def security_group_destroy_all(context, session=None):
session = get_session()
with session.begin():
session.query(models.SecurityGroup).\
- update({'deleted': 1,
- 'deleted_at': datetime.datetime.utcnow(),
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupIngressRule).\
- update({'deleted': 1,
- 'deleted_at': datetime.datetime.utcnow(),
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -2642,7 +2653,7 @@ def instance_metadata_delete(context, instance_id, key):
filter_by(key=key).\
filter_by(deleted=False).\
update({'deleted': True,
- 'deleted_at': datetime.datetime.utcnow(),
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -2653,7 +2664,7 @@ def instance_metadata_delete_all(context, instance_id):
filter_by(instance_id=instance_id).\
filter_by(deleted=False).\
update({'deleted': True,
- 'deleted_at': datetime.datetime.utcnow(),
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -2688,7 +2699,7 @@ def instance_metadata_update_or_create(context, instance_id, metadata):
meta_ref = models.InstanceMetadata()
meta_ref.update({"key": key, "value": value,
"instance_id": instance_id,
- "deleted": 0})
+ "deleted": False})
meta_ref.save(session=session)
return metadata
@@ -2719,7 +2730,7 @@ def instance_type_extra_specs_delete(context, instance_type_id, key):
filter_by(key=key).\
filter_by(deleted=False).\
update({'deleted': True,
- 'deleted_at': datetime.datetime.utcnow(),
+ 'deleted_at': utils.utcnow(),
'updated_at': literal_column('updated_at')})
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
index a2d8192ca..a4fe3e482 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
@@ -17,7 +17,7 @@
from sqlalchemy import Boolean, Column, DateTime, Integer
from sqlalchemy import MetaData, String, Table
-import datetime
+from nova import utils
meta = MetaData()
@@ -35,9 +35,9 @@ def old_style_quotas_table(name):
return Table(name, meta,
Column('id', Integer(), primary_key=True),
Column('created_at', DateTime(),
- default=datetime.datetime.utcnow),
+ default=utils.utcnow),
Column('updated_at', DateTime(),
- onupdate=datetime.datetime.utcnow),
+ onupdate=utils.utcnow),
Column('deleted_at', DateTime()),
Column('deleted', Boolean(), default=False),
Column('project_id',
@@ -57,9 +57,9 @@ def new_style_quotas_table(name):
return Table(name, meta,
Column('id', Integer(), primary_key=True),
Column('created_at', DateTime(),
- default=datetime.datetime.utcnow),
+ default=utils.utcnow),
Column('updated_at', DateTime(),
- onupdate=datetime.datetime.utcnow),
+ onupdate=utils.utcnow),
Column('deleted_at', DateTime()),
Column('deleted', Boolean(), default=False),
Column('project_id',
@@ -160,7 +160,7 @@ def convert_backward(migrate_engine, old_quotas, new_quotas):
'project_id': quota.project_id,
'created_at': quota.created_at,
'updated_at': quota.updated_at,
- quota.resource: quota.hard_limit
+ quota.resource: quota.hard_limit,
}
else:
quotas[quota.project_id]['created_at'] = earliest(
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py
index a169afb40..73c76f666 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py
@@ -14,23 +14,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import Column, Integer, MetaData, String, Table
-#from nova import log as logging
+from sqlalchemy import MetaData, Table
meta = MetaData()
-c_manageent = Column('server_manageent_url',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=True)
-
-c_management = Column('server_management_url',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=True)
-
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
@@ -40,11 +27,8 @@ def upgrade(migrate_engine):
tokens = Table('auth_tokens', meta, autoload=True,
autoload_with=migrate_engine)
- tokens.create_column(c_management)
- migrate_engine.execute(tokens.update()
- .values(server_management_url=tokens.c.server_manageent_url))
-
- tokens.c.server_manageent_url.drop()
+ c_manageent = tokens.c.server_manageent_url
+ c_manageent.alter(name='server_management_url')
def downgrade(migrate_engine):
@@ -53,8 +37,5 @@ def downgrade(migrate_engine):
tokens = Table('auth_tokens', meta, autoload=True,
autoload_with=migrate_engine)
- tokens.create_column(c_manageent)
- migrate_engine.execute(tokens.update()
- .values(server_manageent_url=tokens.c.server_management_url))
-
- tokens.c.server_management_url.drop()
+ c_management = tokens.c.server_management_url
+ c_management.alter(name='server_manageent_url')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py b/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py
new file mode 100644
index 000000000..73a5e8477
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py
@@ -0,0 +1,40 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Integer, MetaData, String, Table
+
+
+meta = MetaData()
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ instances = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ image_id_column = instances.c.image_id
+ image_id_column.alter(name='image_ref')
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+ instances = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ image_ref_column = instances.c.image_ref
+ image_ref_column.alter(name='image_id')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py b/nova/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py
new file mode 100644
index 000000000..6e590479f
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/022_set_engine_mysql_innodb.py
@@ -0,0 +1,65 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import MetaData, Table
+
+meta = MetaData()
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ if migrate_engine.name == "mysql":
+ migrate_engine.execute("ALTER TABLE auth_tokens Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE certificates Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE compute_nodes Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE console_pools Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE consoles Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE export_devices Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE fixed_ips Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE floating_ips Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE instance_actions Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE instance_metadata Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE instance_types Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE instances Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE iscsi_targets Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE key_pairs Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE migrate_version Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE migrations Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE networks Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE projects Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE quotas Engine=InnoDB")
+ migrate_engine.execute(
+ "ALTER TABLE security_group_instance_association Engine=InnoDB")
+ migrate_engine.execute(
+ "ALTER TABLE security_group_rules Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE security_groups Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE services Engine=InnoDB")
+ migrate_engine.execute(
+ "ALTER TABLE user_project_association Engine=InnoDB")
+ migrate_engine.execute(
+ "ALTER TABLE user_project_role_association Engine=InnoDB")
+ migrate_engine.execute(
+ "ALTER TABLE user_role_association Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE users Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE volumes Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE zones Engine=InnoDB")
+ migrate_engine.execute("ALTER TABLE snapshots Engine=InnoDB")
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py
new file mode 100644
index 000000000..0c587f569
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py
@@ -0,0 +1,45 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Integer, MetaData, String, Table
+
+meta = MetaData()
+
+instances_vm_mode = Column('vm_mode',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ instances.create_column(instances_vm_mode)
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instances = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ instances.drop_column('vm_mode')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/021_add_instance_type_extra_specs.py b/nova/db/sqlalchemy/migrate_repo/versions/024_add_instance_type_extra_specs.py
index f26ad6d2c..f26ad6d2c 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/021_add_instance_type_extra_specs.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/024_add_instance_type_extra_specs.py
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index d4c217450..52de9298a 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -19,8 +19,6 @@
SQLAlchemy models for nova data.
"""
-import datetime
-
from sqlalchemy.orm import relationship, backref, object_mapper
from sqlalchemy import Column, Integer, String, schema
from sqlalchemy import ForeignKey, DateTime, Boolean, Text
@@ -33,6 +31,7 @@ from nova.db.sqlalchemy.session import get_session
from nova import auth
from nova import exception
from nova import flags
+from nova import utils
FLAGS = flags.FLAGS
@@ -43,10 +42,11 @@ class NovaBase(object):
"""Base class for Nova Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
__table_initialized__ = False
- created_at = Column(DateTime, default=datetime.datetime.utcnow)
- updated_at = Column(DateTime, onupdate=datetime.datetime.utcnow)
+ created_at = Column(DateTime, default=utils.utcnow)
+ updated_at = Column(DateTime, onupdate=utils.utcnow)
deleted_at = Column(DateTime)
deleted = Column(Boolean, default=False)
+ metadata = None
def save(self, session=None):
"""Save this object."""
@@ -64,7 +64,7 @@ class NovaBase(object):
def delete(self, session=None):
"""Delete this object."""
self.deleted = True
- self.deleted_at = datetime.datetime.utcnow()
+ self.deleted_at = utils.utcnow()
self.save(session=session)
def __setitem__(self, key, value):
@@ -184,11 +184,11 @@ class Instance(BASE, NovaBase):
def project(self):
return auth.manager.AuthManager().get_project(self.project_id)
- image_id = Column(String(255))
+ image_ref = Column(String(255))
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
-# image_id = Column(Integer, ForeignKey('images.id'), nullable=True)
+# image_ref = Column(Integer, ForeignKey('images.id'), nullable=True)
# kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True)
# ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True)
# ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id))
@@ -232,6 +232,7 @@ class Instance(BASE, NovaBase):
locked = Column(Boolean)
os_type = Column(String(255))
+ vm_mode = Column(String(255))
# TODO(vish): see Ewan's email about state improvements, probably
# should be in a driver base class or some such
diff --git a/nova/exception.py b/nova/exception.py
index 394004fe6..c08e47efe 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -65,7 +65,7 @@ class BuildInProgress(Error):
class DBError(Error):
"""Wraps an implementation specific exception."""
- def __init__(self, inner_exception):
+ def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
@@ -122,7 +122,7 @@ class NotAuthorized(NovaException):
message = _("Not authorized.")
def __init__(self, *args, **kwargs):
- super(NotFound, self).__init__(**kwargs)
+ super(NotAuthorized, self).__init__(**kwargs)
class AdminRequired(NotAuthorized):
@@ -291,6 +291,15 @@ class DiskNotFound(NotFound):
message = _("No disk at %(location)s")
+class InvalidImageRef(Invalid):
+ message = _("Invalid image href %(image_href)s.")
+
+
+class ListingImageRefsNotSupported(Invalid):
+ message = _("Some images have been stored via hrefs."
+ + " This version of the api does not support displaying image hrefs.")
+
+
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
@@ -367,6 +376,10 @@ class NoFloatingIpsDefinedForInstance(NoFloatingIpsDefined):
message = _("Zero floating ips defined for instance %(instance_id)s.")
+class NoMoreFloatingIps(NotFound):
+ message = _("Zero floating ips available.")
+
+
class KeypairNotFound(NotFound):
message = _("Keypair %(keypair_name)s not found for user %(user_id)s")
@@ -477,6 +490,15 @@ class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
+class SchedulerCostFunctionNotFound(NotFound):
+ message = _("Scheduler cost function %(cost_fn_str)s could"
+ " not be found.")
+
+
+class SchedulerWeightFlagNotFound(NotFound):
+ message = _("Scheduler weight flag not found: %(flag_name)s")
+
+
class InstanceMetadataNotFound(NotFound):
message = _("Instance %(instance_id)s has no metadata with "
"key %(metadata_key)s.")
@@ -568,3 +590,7 @@ class InstanceExists(Duplicate):
class MigrationError(NovaException):
message = _("Migration error") + ": %(reason)s"
+
+
+class MalformedRequestBody(NovaException):
+ message = _("Malformed message body: %(reason)s")
diff --git a/nova/flags.py b/nova/flags.py
index 9eaac5596..acfcf8d68 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -270,8 +270,10 @@ DEFINE_list('region_list',
DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
-DEFINE_integer('glance_port', 9292, 'glance port')
-DEFINE_string('glance_host', '$my_ip', 'glance host')
+# NOTE(sirp): my_ip interpolation doesn't work within nested structures
+DEFINE_list('glance_api_servers',
+ ['127.0.0.1:9292'],
+ 'list of glance api servers available to nova (host:port)')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)')
DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)')
@@ -296,6 +298,7 @@ DEFINE_bool('fake_network', False,
'should we use fake network devices and addresses')
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
+DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL')
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
@@ -380,3 +383,5 @@ DEFINE_string('zone_name', 'nova', 'name of this zone')
DEFINE_list('zone_capabilities',
['hypervisor=xenserver;kvm', 'os=linux;windows'],
'Key/Multi-value list representng capabilities of this zone')
+DEFINE_string('build_plan_encryption_key', None,
+ '128bit (hex) encryption key for scheduler build plans.')
diff --git a/nova/image/__init__.py b/nova/image/__init__.py
index e69de29bb..a27d649d4 100644
--- a/nova/image/__init__.py
+++ b/nova/image/__init__.py
@@ -0,0 +1,98 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from urlparse import urlparse
+
+import nova
+from nova import exception
+from nova import utils
+from nova import flags
+from nova.image import glance as glance_image_service
+
+FLAGS = flags.FLAGS
+
+
+GlanceClient = utils.import_class('glance.client.Client')
+
+
+def _parse_image_ref(image_href):
+ """Parse an image href into composite parts.
+
+ :param image_href: href of an image
+ :returns: a tuple of the form (image_id, host, port)
+
+ """
+ o = urlparse(image_href)
+ port = o.port or 80
+ host = o.netloc.split(':', 1)[0]
+ image_id = int(o.path.split('/')[-1])
+ return (image_id, host, port)
+
+
+def get_default_image_service():
+ ImageService = utils.import_class(FLAGS.image_service)
+ return ImageService()
+
+
+# FIXME(sirp): perhaps this should be moved to nova/images/glance so that we
+# keep Glance specific code together for the most part
+def get_glance_client(image_href):
+ """Get the correct glance client and id for the given image_href.
+
+ The image_href param can be an href of the form
+ http://myglanceserver:9292/images/42, or just an int such as 42. If the
+ image_href is an int, then flags are used to create the default
+ glance client.
+
+ :param image_href: image ref/id for an image
+ :returns: a tuple of the form (glance_client, image_id)
+
+ """
+ image_href = image_href or 0
+ if str(image_href).isdigit():
+ glance_host, glance_port = \
+ glance_image_service.pick_glance_api_server()
+ glance_client = GlanceClient(glance_host, glance_port)
+ return (glance_client, int(image_href))
+
+ try:
+ (image_id, host, port) = _parse_image_ref(image_href)
+ except:
+ raise exception.InvalidImageRef(image_href=image_href)
+ glance_client = GlanceClient(host, port)
+ return (glance_client, image_id)
+
+
+def get_image_service(image_href):
+ """Get the proper image_service and id for the given image_href.
+
+ The image_href param can be an href of the form
+ http://myglanceserver:9292/images/42, or just an int such as 42. If the
+ image_href is an int, then the default image service is returned.
+
+ :param image_href: image ref/id for an image
+ :returns: a tuple of the form (image_service, image_id)
+
+ """
+ image_href = image_href or 0
+ if str(image_href).isdigit():
+ return (get_default_image_service(), int(image_href))
+
+ (glance_client, image_id) = get_glance_client(image_href)
+ image_service = nova.image.glance.GlanceImageService(glance_client)
+ return (image_service, image_id)
diff --git a/nova/image/fake.py b/nova/image/fake.py
index 8e84c8597..70a5f0e22 100644
--- a/nova/image/fake.py
+++ b/nova/image/fake.py
@@ -19,6 +19,7 @@
import copy
import datetime
+import random
from nova import exception
from nova import flags
@@ -32,7 +33,7 @@ LOG = logging.getLogger('nova.image.fake')
FLAGS = flags.FLAGS
-class FakeImageService(service.BaseImageService):
+class _FakeImageService(service.BaseImageService):
"""Mock (fake) image service for unit testing."""
def __init__(self):
@@ -40,7 +41,48 @@ class FakeImageService(service.BaseImageService):
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
- image = {'id': '123456',
+ image1 = {'id': '123456',
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'status': 'active',
+ 'container_format': 'ami',
+ 'disk_format': 'raw',
+ 'properties': {'kernel_id': FLAGS.null_kernel,
+ 'ramdisk_id': FLAGS.null_kernel,
+ 'architecture': 'x86_64'}}
+
+ image2 = {'id': 'fake',
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'status': 'active',
+ 'container_format': 'ami',
+ 'disk_format': 'raw',
+ 'properties': {'kernel_id': FLAGS.null_kernel,
+ 'ramdisk_id': FLAGS.null_kernel}}
+
+ image3 = {'id': '2',
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'status': 'active',
+ 'container_format': 'ami',
+ 'disk_format': 'raw',
+ 'properties': {'kernel_id': FLAGS.null_kernel,
+ 'ramdisk_id': FLAGS.null_kernel}}
+
+ image4 = {'id': '1',
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'status': 'active',
+ 'container_format': 'ami',
+ 'disk_format': 'raw',
+ 'properties': {'kernel_id': FLAGS.null_kernel,
+ 'ramdisk_id': FLAGS.null_kernel}}
+
+ image5 = {'id': '3',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
@@ -49,14 +91,19 @@ class FakeImageService(service.BaseImageService):
'disk_format': 'raw',
'properties': {'kernel_id': FLAGS.null_kernel,
'ramdisk_id': FLAGS.null_kernel}}
- self.create(None, image)
- super(FakeImageService, self).__init__()
- def index(self, context, filters=None):
+ self.create(None, image1)
+ self.create(None, image2)
+ self.create(None, image3)
+ self.create(None, image4)
+ self.create(None, image5)
+ super(_FakeImageService, self).__init__()
+
+ def index(self, context, filters=None, marker=None, limit=None):
"""Returns list of images."""
return copy.deepcopy(self.images.values())
- def detail(self, context, filters=None):
+ def detail(self, context, filters=None, marker=None, limit=None):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
@@ -66,36 +113,41 @@ class FakeImageService(service.BaseImageService):
Returns a dict containing image data for the given opaque image id.
"""
- image_id = int(image_id)
- image = self.images.get(image_id)
+ image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
raise exception.ImageNotFound(image_id=image_id)
- def create(self, context, data):
+ def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises: Duplicate if the image already exist.
"""
- image_id = int(data['id'])
+ try:
+ image_id = metadata['id']
+ except KeyError:
+ image_id = random.randint(0, 2 ** 31 - 1)
+ image_id = str(image_id)
+
if self.images.get(image_id):
raise exception.Duplicate()
- self.images[image_id] = copy.deepcopy(data)
+ metadata['id'] = image_id
+ self.images[image_id] = copy.deepcopy(metadata)
+ return self.images[image_id]
- def update(self, context, image_id, data):
+ def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image with the new data.
:raises: ImageNotFound if the image does not exist.
"""
- image_id = int(image_id)
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
- self.images[image_id] = copy.deepcopy(data)
+ self.images[image_id] = copy.deepcopy(metadata)
def delete(self, context, image_id):
"""Delete the given image.
@@ -103,7 +155,6 @@ class FakeImageService(service.BaseImageService):
:raises: ImageNotFound if the image does not exist.
"""
- image_id = int(image_id)
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
@@ -111,3 +162,9 @@ class FakeImageService(service.BaseImageService):
def delete_all(self):
"""Clears out all images."""
self.images.clear()
+
+_fakeImageService = _FakeImageService()
+
+
+def FakeImageService():
+ return _fakeImageService
diff --git a/nova/image/glance.py b/nova/image/glance.py
index dec797619..6e058ab2f 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -20,6 +20,7 @@
from __future__ import absolute_import
import datetime
+import random
from glance.common import exception as glance_exception
@@ -39,6 +40,21 @@ FLAGS = flags.FLAGS
GlanceClient = utils.import_class('glance.client.Client')
+def pick_glance_api_server():
+ """Return which Glance API server to use for the request
+
+ This method provides a very primitive form of load-balancing suitable for
+ testing and sandbox environments. In production, it would be better to use
+ one IP and route that to a real load-balancer.
+
+ Returns (host, port)
+ """
+ host_port = random.choice(FLAGS.glance_api_servers)
+ host, port_str = host_port.split(':')
+ port = int(port_str)
+ return host, port
+
+
class GlanceImageService(service.BaseImageService):
"""Provides storage and retrieval of disk image objects within Glance."""
@@ -51,30 +67,43 @@ class GlanceImageService(service.BaseImageService):
GLANCE_ONLY_ATTRS
def __init__(self, client=None):
- # FIXME(sirp): can we avoid dependency-injection here by using
- # stubbing out a fake?
- if client is None:
- self.client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port)
- else:
- self.client = client
-
- def index(self, context, filters=None):
+ self._client = client
+
+ def _get_client(self):
+ # NOTE(sirp): we want to load balance each request across glance
+ # servers. Since GlanceImageService is a long-lived object, `client`
+ # is made to choose a new server each time via this property.
+ if self._client is not None:
+ return self._client
+ glance_host, glance_port = pick_glance_api_server()
+ return GlanceClient(glance_host, glance_port)
+
+ def _set_client(self, client):
+ self._client = client
+
+ client = property(_get_client, _set_client)
+
+ def index(self, context, filters=None, marker=None, limit=None):
"""Calls out to Glance for a list of images available."""
# NOTE(sirp): We need to use `get_images_detailed` and not
# `get_images` here because we need `is_public` and `properties`
# included so we can filter by user
filtered = []
- image_metas = self.client.get_images_detailed(filters=filters)
+ image_metas = self.client.get_images_detailed(filters=filters,
+ marker=marker,
+ limit=limit)
for image_meta in image_metas:
if self._is_image_available(context, image_meta):
meta_subset = utils.subset_dict(image_meta, ('id', 'name'))
filtered.append(meta_subset)
return filtered
- def detail(self, context, filters=None):
+ def detail(self, context, filters=None, marker=None, limit=None):
"""Calls out to Glance for a list of detailed image information."""
filtered = []
- image_metas = self.client.get_images_detailed(filters=filters)
+ image_metas = self.client.get_images_detailed(filters=filters,
+ marker=marker,
+ limit=limit)
for image_meta in image_metas:
if self._is_image_available(context, image_meta):
base_image_meta = self._translate_to_base(image_meta)
diff --git a/nova/image/local.py b/nova/image/local.py
index 677d5302b..c7dee4573 100644
--- a/nova/image/local.py
+++ b/nova/image/local.py
@@ -63,7 +63,8 @@ class LocalImageService(service.BaseImageService):
images.append(unhexed_image_id)
return images
- def index(self, context, *args, **kwargs):
+ def index(self, context, filters=None, marker=None, limit=None):
+ # TODO(blamar): Make use of filters, marker, and limit
filtered = []
image_metas = self.detail(context)
for image_meta in image_metas:
@@ -71,7 +72,8 @@ class LocalImageService(service.BaseImageService):
filtered.append(meta)
return filtered
- def detail(self, context, *args, **kwargs):
+ def detail(self, context, filters=None, marker=None, limit=None):
+ # TODO(blamar): Make use of filters, marker, and limit
images = []
for image_id in self._ids():
try:
diff --git a/nova/image/s3.py b/nova/image/s3.py
index ec8852f09..9e95bd698 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -31,6 +31,7 @@ import eventlet
from nova import crypto
from nova import exception
from nova import flags
+from nova import image
from nova import log as logging
from nova import utils
from nova.auth import manager
@@ -48,9 +49,7 @@ class S3ImageService(service.BaseImageService):
"""Wraps an existing image service to support s3 based register."""
def __init__(self, service=None, *args, **kwargs):
- if service is None:
- service = utils.import_object(FLAGS.image_service)
- self.service = service
+ self.service = service or image.get_default_image_service()
self.service.__init__(*args, **kwargs)
def create(self, context, metadata, data=None):
diff --git a/nova/log.py b/nova/log.py
index 096279f7c..6909916a1 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -35,6 +35,7 @@ import os
import sys
import traceback
+import nova
from nova import flags
from nova import version
@@ -63,6 +64,7 @@ flags.DEFINE_list('default_log_levels',
'eventlet.wsgi.server=WARN'],
'list of logger=LEVEL pairs')
flags.DEFINE_bool('use_syslog', False, 'output to syslog')
+flags.DEFINE_bool('publish_errors', False, 'publish error events')
flags.DEFINE_string('logfile', None, 'output to named file')
@@ -258,12 +260,20 @@ class NovaRootLogger(NovaLogger):
else:
self.removeHandler(self.filelog)
self.addHandler(self.streamlog)
+ if FLAGS.publish_errors:
+ self.addHandler(PublishErrorsHandler(ERROR))
if FLAGS.verbose:
self.setLevel(DEBUG)
else:
self.setLevel(INFO)
+class PublishErrorsHandler(logging.Handler):
+ def emit(self, record):
+ nova.notifier.api.notify('nova.error.publisher', 'error_notification',
+ nova.notifier.api.ERROR, dict(error=record.msg))
+
+
def handle_exception(type, value, tb):
extra = {}
if FLAGS.verbose:
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 5a6fdde5a..b5352ca0f 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -86,6 +86,7 @@ flags.DEFINE_string('floating_range', '4.4.4.0/24',
'Floating IP address block')
flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block')
flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block')
+flags.DEFINE_string('gateway_v6', None, 'Default IPv6 gateway')
flags.DEFINE_integer('cnt_vpn_clients', 0,
'Number of addresses reserved for vpn clients')
flags.DEFINE_string('network_driver', 'nova.network.linux_net',
@@ -235,7 +236,7 @@ class NetworkManager(manager.SchedulerDependentManager):
inst_addr = instance_ref['mac_address']
raise exception.Error(_('IP %(address)s leased to bad mac'
' %(inst_addr)s vs %(mac)s') % locals())
- now = datetime.datetime.utcnow()
+ now = utils.utcnow()
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
{'leased': True,
@@ -292,7 +293,7 @@ class NetworkManager(manager.SchedulerDependentManager):
return host
def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, label, *args, **kwargs):
+ cidr_v6, gateway_v6, label, *args, **kwargs):
"""Create networks based on parameters."""
fixed_net = IPy.IP(cidr)
fixed_net_v6 = IPy.IP(cidr_v6)
@@ -324,7 +325,13 @@ class NetworkManager(manager.SchedulerDependentManager):
significant_bits_v6)
net['cidr_v6'] = cidr_v6
project_net_v6 = IPy.IP(cidr_v6)
- net['gateway_v6'] = str(project_net_v6[1])
+
+ if gateway_v6:
+ # use a pre-defined gateway if one is provided
+ net['gateway_v6'] = str(gateway_v6)
+ else:
+ net['gateway_v6'] = str(project_net_v6[1])
+
net['netmask_v6'] = str(project_net_v6.prefixlen())
network_ref = self.db.network_create_safe(context, net)
diff --git a/nova/notifier/api.py b/nova/notifier/api.py
index a3e7a039e..d49517c8b 100644
--- a/nova/notifier/api.py
+++ b/nova/notifier/api.py
@@ -11,9 +11,8 @@
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
-# under the License.import datetime
+# under the License.
-import datetime
import uuid
from nova import flags
@@ -64,7 +63,7 @@ def notify(publisher_id, event_type, priority, payload):
{'message_id': str(uuid.uuid4()),
'publisher_id': 'compute.host1',
- 'timestamp': datetime.datetime.utcnow(),
+ 'timestamp': utils.utcnow(),
'priority': 'WARN',
'event_type': 'compute.create_instance',
'payload': {'instance_id': 12, ... }}
@@ -79,5 +78,5 @@ def notify(publisher_id, event_type, priority, payload):
event_type=event_type,
priority=priority,
payload=payload,
- timestamp=str(datetime.datetime.utcnow()))
+ timestamp=str(utils.utcnow()))
driver.notify(msg)
diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py
index dd6327c8f..76025a1e3 100644
--- a/nova/objectstore/s3server.py
+++ b/nova/objectstore/s3server.py
@@ -81,7 +81,7 @@ class S3Application(wsgi.Router):
super(S3Application, self).__init__(mapper)
-class BaseRequestHandler(wsgi.Controller):
+class BaseRequestHandler(object):
"""Base class emulating Tornado's web framework pattern in WSGI.
This is a direct port of Tornado's implementation, so some key decisions
diff --git a/nova/rpc.py b/nova/rpc.py
index c5277c6a9..2e78a31e7 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -65,6 +65,7 @@ class Connection(carrot_connection.BrokerConnection):
if new or not hasattr(cls, '_instance'):
params = dict(hostname=FLAGS.rabbit_host,
port=FLAGS.rabbit_port,
+ ssl=FLAGS.rabbit_use_ssl,
userid=FLAGS.rabbit_userid,
password=FLAGS.rabbit_password,
virtual_host=FLAGS.rabbit_virtual_host)
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
index 55f8e0a6d..3b3195c2e 100644
--- a/nova/scheduler/api.py
+++ b/nova/scheduler/api.py
@@ -84,7 +84,7 @@ def get_zone_capabilities(context):
def select(context, specs=None):
"""Returns a list of hosts."""
return _call_scheduler('select', context=context,
- params={"specs": specs})
+ params={"request_spec": specs})
def update_service_capabilities(context, service_name, host, capabilities):
@@ -106,12 +106,14 @@ def _wrap_method(function, self):
def _process(func, zone):
"""Worker stub for green thread pool. Give the worker
an authenticated nova client and zone info."""
- nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
+ nova = novaclient.OpenStack(zone.username, zone.password, None,
+ zone.api_url)
nova.authenticate()
return func(nova, zone)
-def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
+def call_zone_method(context, method_name, errors_to_ignore=None,
+ novaclient_collection_name='zones', *args, **kwargs):
"""Returns a list of (zone, call_result) objects."""
if not isinstance(errors_to_ignore, (list, tuple)):
# This will also handle the default None
@@ -121,7 +123,7 @@ def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
results = []
for zone in db.zone_get_all(context):
try:
- nova = novaclient.OpenStack(zone.username, zone.password,
+ nova = novaclient.OpenStack(zone.username, zone.password, None,
zone.api_url)
nova.authenticate()
except novaclient.exceptions.BadRequest, e:
@@ -131,18 +133,16 @@ def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs):
#TODO (dabo) - add logic for failure counts per zone,
# with escalation after a given number of failures.
continue
- zone_method = getattr(nova.zones, method)
+ novaclient_collection = getattr(nova, novaclient_collection_name)
+ collection_method = getattr(novaclient_collection, method_name)
def _error_trap(*args, **kwargs):
try:
- return zone_method(*args, **kwargs)
+ return collection_method(*args, **kwargs)
except Exception as e:
if type(e) in errors_to_ignore:
return None
- # TODO (dabo) - want to be able to re-raise here.
- # Returning a string now; raising was causing issues.
- # raise e
- return "ERROR", "%s" % e
+ raise
res = pool.spawn(_error_trap, *args, **kwargs)
results.append((zone, res))
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 2094e3565..0b257c5d8 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -28,6 +28,7 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
+from nova import utils
from nova.compute import power_state
FLAGS = flags.FLAGS
@@ -61,7 +62,7 @@ class Scheduler(object):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
- elapsed = datetime.datetime.utcnow() - last_heartbeat
+ elapsed = utils.utcnow() - last_heartbeat
return elapsed < datetime.timedelta(seconds=FLAGS.service_down_time)
def hosts_up(self, context, topic):
diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py
index 4260cbf42..bd6b26608 100644
--- a/nova/scheduler/host_filter.py
+++ b/nova/scheduler/host_filter.py
@@ -41,6 +41,7 @@ import json
from nova import exception
from nova import flags
from nova import log as logging
+from nova.scheduler import zone_aware_scheduler
from nova import utils
from nova.scheduler import zone_aware_scheduler
@@ -226,7 +227,7 @@ class JsonFilter(HostFilter):
required_disk = instance_type['local_gb']
query = ['and',
['>=', '$compute.host_memory_free', required_ram],
- ['>=', '$compute.disk_available', required_disk]
+ ['>=', '$compute.disk_available', required_disk],
]
return (self._full_name(), json.dumps(query))
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
new file mode 100644
index 000000000..629fe2e42
--- /dev/null
+++ b/nova/scheduler/least_cost.py
@@ -0,0 +1,156 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Least Cost Scheduler is a mechanism for choosing which host machines to
+provision a set of resources to. The input of the least-cost-scheduler is a
+set of objective-functions, called the 'cost-functions', a weight for each
+cost-function, and a list of candidate hosts (gathered via FilterHosts).
+
+The cost-function and weights are tabulated, and the host with the least cost
+is then selected for provisioning.
+"""
+
+import collections
+
+from nova import flags
+from nova import log as logging
+from nova.scheduler import zone_aware_scheduler
+from nova import utils
+
+LOG = logging.getLogger('nova.scheduler.least_cost')
+
+FLAGS = flags.FLAGS
+flags.DEFINE_list('least_cost_scheduler_cost_functions',
+ ['nova.scheduler.least_cost.noop_cost_fn'],
+ 'Which cost functions the LeastCostScheduler should use.')
+
+
+# TODO(sirp): Once we have enough of these rules, we can break them out into a
+# cost_functions.py file (perhaps in a least_cost_scheduler directory)
+flags.DEFINE_integer('noop_cost_fn_weight', 1,
+ 'How much weight to give the noop cost function')
+
+
+def noop_cost_fn(host):
+ """Return a pre-weight cost of 1 for each host"""
+ return 1
+
+
+flags.DEFINE_integer('fill_first_cost_fn_weight', 1,
+ 'How much weight to give the fill-first cost function')
+
+
+def fill_first_cost_fn(host):
+ """Prefer hosts that have less ram available, filter_hosts will exclude
+ hosts that don't have enough ram"""
+ hostname, caps = host
+ free_mem = caps['compute']['host_memory_free']
+ return free_mem
+
+
+class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
+ def get_cost_fns(self):
+ """Returns a list of tuples containing weights and cost functions to
+ use for weighing hosts
+ """
+ cost_fns = []
+ for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
+
+ try:
+ # NOTE(sirp): import_class is somewhat misnamed since it can
+ # any callable from a module
+ cost_fn = utils.import_class(cost_fn_str)
+ except exception.ClassNotFound:
+ raise exception.SchedulerCostFunctionNotFound(
+ cost_fn_str=cost_fn_str)
+
+ try:
+ weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__)
+ except AttributeError:
+ raise exception.SchedulerWeightFlagNotFound(
+ flag_name=flag_name)
+
+ cost_fns.append((weight, cost_fn))
+
+ return cost_fns
+
+ def weigh_hosts(self, num, request_spec, hosts):
+ """Returns a list of dictionaries of form:
+ [ {weight: weight, hostname: hostname} ]"""
+
+ # FIXME(sirp): weigh_hosts should handle more than just instances
+ hostnames = [hostname for hostname, caps in hosts]
+
+ cost_fns = self.get_cost_fns()
+ costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
+
+ weighted = []
+ weight_log = []
+ for cost, hostname in zip(costs, hostnames):
+ weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
+ weight_dict = dict(weight=cost, hostname=hostname)
+ weighted.append(weight_dict)
+
+ LOG.debug(_("Weighted Costs => %s") % weight_log)
+ return weighted
+
+
+def normalize_list(L):
+ """Normalize an array of numbers such that each element satisfies:
+ 0 <= e <= 1"""
+ if not L:
+ return L
+ max_ = max(L)
+ if max_ > 0:
+ return [(float(e) / max_) for e in L]
+ return L
+
+
+def weighted_sum(domain, weighted_fns, normalize=True):
+ """Use the weighted-sum method to compute a score for an array of objects.
+ Normalize the results of the objective-functions so that the weights are
+ meaningful regardless of objective-function's range.
+
+ domain - input to be scored
+ weighted_fns - list of weights and functions like:
+ [(weight, objective-functions)]
+
+ Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts)
+ """
+ # Table of form:
+ # { domain1: [score1, score2, ..., scoreM]
+ # ...
+ # domainN: [score1, score2, ..., scoreM] }
+ score_table = collections.defaultdict(list)
+ for weight, fn in weighted_fns:
+ scores = [fn(elem) for elem in domain]
+
+ if normalize:
+ norm_scores = normalize_list(scores)
+ else:
+ norm_scores = scores
+
+ for idx, score in enumerate(norm_scores):
+ weighted_score = score * weight
+ score_table[idx].append(weighted_score)
+
+ # Sum rows in table to compute score for each element in domain
+ domain_scores = []
+ for idx in sorted(score_table):
+ elem_score = sum(score_table[idx])
+ elem = domain[idx]
+ domain_scores.append(elem_score)
+
+ return domain_scores
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index bd40e73c0..6cb75aa8d 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -70,6 +70,14 @@ class SchedulerManager(manager.Manager):
self.zone_manager.update_service_capabilities(service_name,
host, capabilities)
+ def select(self, context=None, *args, **kwargs):
+ """Select a list of hosts best matching the provided specs."""
+ return self.driver.select(context, *args, **kwargs)
+
+ def get_scheduler_rules(self, context=None, *args, **kwargs):
+ """Ask the driver how requests should be made of it."""
+ return self.driver.get_scheduler_rules(context, *args, **kwargs)
+
def _schedule(self, method, context, topic, *args, **kwargs):
"""Tries to call schedule_* method on the driver to retrieve host.
@@ -80,7 +88,9 @@ class SchedulerManager(manager.Manager):
try:
host = getattr(self.driver, driver_method)(elevated, *args,
**kwargs)
- except AttributeError:
+ except AttributeError, e:
+ LOG.warning(_("Driver Method %(driver_method)s missing: %(e)s."
+ "Reverting to schedule()") % locals())
host = self.driver.schedule(elevated, topic, *args, **kwargs)
if not host:
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index dd568d2c6..87cdef11d 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -21,10 +21,9 @@
Simple Scheduler
"""
-import datetime
-
from nova import db
from nova import flags
+from nova import utils
from nova.scheduler import driver
from nova.scheduler import chance
@@ -54,7 +53,7 @@ class SimpleScheduler(chance.ChanceScheduler):
# TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow
- now = datetime.datetime.utcnow()
+ now = utils.utcnow()
db.instance_update(context, instance_id, {'host': host,
'scheduled_at': now})
return host
@@ -66,7 +65,7 @@ class SimpleScheduler(chance.ChanceScheduler):
if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we
# can generalize this somehow
- now = datetime.datetime.utcnow()
+ now = utils.utcnow()
db.instance_update(context,
instance_id,
{'host': service['host'],
@@ -90,7 +89,7 @@ class SimpleScheduler(chance.ChanceScheduler):
# TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow
- now = datetime.datetime.utcnow()
+ now = utils.utcnow()
db.volume_update(context, volume_id, {'host': host,
'scheduled_at': now})
return host
@@ -103,7 +102,7 @@ class SimpleScheduler(chance.ChanceScheduler):
if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we
# can generalize this somehow
- now = datetime.datetime.utcnow()
+ now = utils.utcnow()
db.volume_update(context,
volume_id,
{'host': service['host'],
diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py
index bc67c7794..e7bff2faa 100644
--- a/nova/scheduler/zone_aware_scheduler.py
+++ b/nova/scheduler/zone_aware_scheduler.py
@@ -21,16 +21,30 @@ across zones. There are two expansion points to this class for:
"""
import operator
+import json
+import M2Crypto
+import novaclient
+
+from nova import crypto
from nova import db
+from nova import exception
+from nova import flags
from nova import log as logging
from nova import rpc
+
from nova.scheduler import api
from nova.scheduler import driver
+FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler')
+class InvalidBlob(exception.NovaException):
+ message = _("Ill-formed or incorrectly routed 'blob' data sent "
+ "to instance create request.")
+
+
class ZoneAwareScheduler(driver.Scheduler):
"""Base class for creating Zone Aware Schedulers."""
@@ -38,8 +52,117 @@ class ZoneAwareScheduler(driver.Scheduler):
"""Call novaclient zone method. Broken out for testing."""
return api.call_zone_method(context, method, specs=specs)
+ def _provision_resource_locally(self, context, item, instance_id, kwargs):
+ """Create the requested resource in this Zone."""
+ host = item['hostname']
+ kwargs['instance_id'] = instance_id
+ rpc.cast(context,
+ db.queue_get_for(context, "compute", host),
+ {"method": "run_instance",
+ "args": kwargs})
+ LOG.debug(_("Provisioning locally via compute node %(host)s")
+ % locals())
+
+ def _decrypt_blob(self, blob):
+ """Returns the decrypted blob or None if invalid. Broken out
+ for testing."""
+ decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
+ try:
+ json_entry = decryptor(blob)
+ return json.dumps(entry)
+ except M2Crypto.EVP.EVPError:
+ pass
+ return None
+
+ def _ask_child_zone_to_create_instance(self, context, zone_info,
+ request_spec, kwargs):
+ """Once we have determined that the request should go to one
+ of our children, we need to fabricate a new POST /servers/
+ call with the same parameters that were passed into us.
+
+ Note that we have to reverse engineer from our args to get back the
+ image, flavor, ipgroup, etc. since the original call could have
+ come in from EC2 (which doesn't use these things)."""
+
+ instance_type = request_spec['instance_type']
+ instance_properties = request_spec['instance_properties']
+
+ name = instance_properties['display_name']
+ image_ref = instance_properties['image_ref']
+ meta = instance_properties['metadata']
+ flavor_id = instance_type['flavorid']
+ reservation_id = instance_properties['reservation_id']
+
+ files = kwargs['injected_files']
+ ipgroup = None # Not supported in OS API ... yet
+
+ child_zone = zone_info['child_zone']
+ child_blob = zone_info['child_blob']
+ zone = db.zone_get(context, child_zone)
+ url = zone.api_url
+ LOG.debug(_("Forwarding instance create call to child zone %(url)s"
+ ". ReservationID=%(reservation_id)s")
+ % locals())
+ nova = None
+ try:
+ nova = novaclient.OpenStack(zone.username, zone.password, None,
+ url)
+ nova.authenticate()
+ except novaclient.exceptions.BadRequest, e:
+ raise exception.NotAuthorized(_("Bad credentials attempting "
+ "to talk to zone at %(url)s.") % locals())
+
+ nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
+ child_blob, reservation_id=reservation_id)
+
+ def _provision_resource_from_blob(self, context, item, instance_id,
+ request_spec, kwargs):
+ """Create the requested resource locally or in a child zone
+ based on what is stored in the zone blob info.
+
+ Attempt to decrypt the blob to see if this request is:
+ 1. valid, and
+ 2. intended for this zone or a child zone.
+
+ Note: If we have "blob" that means the request was passed
+ into us from a parent zone. If we have "child_blob" that
+ means we gathered the info from one of our children.
+ It's possible that, when we decrypt the 'blob' field, it
+ contains "child_blob" data. In which case we forward the
+ request."""
+
+ host_info = None
+ if "blob" in item:
+ # Request was passed in from above. Is it for us?
+ host_info = self._decrypt_blob(item['blob'])
+ elif "child_blob" in item:
+ # Our immediate child zone provided this info ...
+ host_info = item
+
+ if not host_info:
+ raise InvalidBlob()
+
+ # Valid data ... is it for us?
+ if 'child_zone' in host_info and 'child_blob' in host_info:
+ self._ask_child_zone_to_create_instance(context, host_info,
+ request_spec, kwargs)
+ else:
+ self._provision_resource_locally(context, host_info,
+ instance_id, kwargs)
+
+ def _provision_resource(self, context, item, instance_id, request_spec,
+ kwargs):
+ """Create the requested resource in this Zone or a child zone."""
+ if "hostname" in item:
+ self._provision_resource_locally(context, item, instance_id,
+ kwargs)
+ return
+
+ self._provision_resource_from_blob(context, item, instance_id,
+ request_spec, kwargs)
+
def schedule_run_instance(self, context, instance_id, request_spec,
- *args, **kwargs):
+ *args, **kwargs):
"""This method is called from nova.compute.api to provision
an instance. However we need to look at the parameters being
passed in to see if this is a request to:
@@ -51,8 +174,10 @@ class ZoneAwareScheduler(driver.Scheduler):
# TODO(sandy): We'll have to look for richer specs at some point.
- if 'blob' in request_spec:
- self.provision_resource(context, request_spec, instance_id, kwargs)
+ blob = request_spec.get('blob')
+ if blob:
+ self._provision_resource(context, request_spec, instance_id,
+ request_spec, kwargs)
return None
# Create build plan and provision ...
@@ -60,29 +185,18 @@ class ZoneAwareScheduler(driver.Scheduler):
if not build_plan:
raise driver.NoValidHost(_('No hosts were available'))
- for item in build_plan:
- self.provision_resource(context, item, instance_id, kwargs)
+ for num in xrange(request_spec['num_instances']):
+ if not build_plan:
+ break
+
+ item = build_plan.pop(0)
+ self._provision_resource(context, item, instance_id, request_spec,
+ kwargs)
# Returning None short-circuits the routing to Compute (since
# we've already done it here)
return None
- def provision_resource(self, context, item, instance_id, kwargs):
- """Create the requested resource in this Zone or a child zone."""
- if "hostname" in item:
- host = item['hostname']
- kwargs['instance_id'] = instance_id
- rpc.cast(context,
- db.queue_get_for(context, "compute", host),
- {"method": "run_instance",
- "args": kwargs})
- LOG.debug(_("Casted to compute %(host)s for run_instance")
- % locals())
- else:
- # TODO(sandy) Provision in child zone ...
- LOG.warning(_("Provision to Child Zone not supported (yet)"))
- pass
-
def select(self, context, request_spec, *args, **kwargs):
"""Select returns a list of weights and zone/host information
corresponding to the best hosts to service the request. Any
@@ -116,22 +230,25 @@ class ZoneAwareScheduler(driver.Scheduler):
# Filter local hosts based on requirements ...
host_list = self.filter_hosts(num_instances, request_spec)
+ # TODO(sirp): weigh_hosts should also be a function of 'topic' or
+ # resources, so that we can apply different objective functions to it
+
# then weigh the selected hosts.
# weighted = [{weight=weight, name=hostname}, ...]
weighted = self.weigh_hosts(num_instances, request_spec, host_list)
# Next, tack on the best weights from the child zones ...
+ json_spec = json.dumps(request_spec)
child_results = self._call_zone_method(context, "select",
- specs=request_spec)
+ specs=json_spec)
for child_zone, result in child_results:
for weighting in result:
# Remember the child_zone so we can get back to
# it later if needed. This implicitly builds a zone
# path structure.
- host_dict = {
- "weight": weighting["weight"],
- "child_zone": child_zone,
- "child_blob": weighting["blob"]}
+ host_dict = {"weight": weighting["weight"],
+ "child_zone": child_zone,
+ "child_blob": weighting["blob"]}
weighted.append(host_dict)
weighted.sort(key=operator.itemgetter('weight'))
@@ -139,12 +256,16 @@ class ZoneAwareScheduler(driver.Scheduler):
def filter_hosts(self, num, request_spec):
"""Derived classes must override this method and return
- a list of hosts in [(hostname, capability_dict)] format.
+ a list of hosts in [(hostname, capability_dict)] format.
"""
- raise NotImplemented()
+ # NOTE(sirp): The default logic is the equivalent to AllHostsFilter
+ service_states = self.zone_manager.service_states
+ return [(host, services)
+ for host, services in service_states.iteritems()]
def weigh_hosts(self, num, request_spec, hosts):
- """Derived classes must override this method and return
- a lists of hosts in [{weight, hostname}] format.
+ """Derived classes may override this to provide more sophisticated
+ scheduling objectives
"""
- raise NotImplemented()
+ # NOTE(sirp): The default logic is the same as the NoopCostFunction
+ return [dict(weight=1, hostname=host) for host, caps in hosts]
diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py
index 3ddf6f3c3..ba7403c15 100644
--- a/nova/scheduler/zone_manager.py
+++ b/nova/scheduler/zone_manager.py
@@ -17,16 +17,17 @@
ZoneManager oversees all communications with child Zones.
"""
+import datetime
import novaclient
import thread
import traceback
-from datetime import datetime
from eventlet import greenpool
from nova import db
from nova import flags
from nova import log as logging
+from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_integer('zone_db_check_interval', 60,
@@ -42,7 +43,7 @@ class ZoneState(object):
self.name = None
self.capabilities = None
self.attempt = 0
- self.last_seen = datetime.min
+ self.last_seen = datetime.datetime.min
self.last_exception = None
self.last_exception_time = None
@@ -56,7 +57,7 @@ class ZoneState(object):
def update_metadata(self, zone_metadata):
"""Update zone metadata after successful communications with
child zone."""
- self.last_seen = datetime.now()
+ self.last_seen = utils.utcnow()
self.attempt = 0
self.name = zone_metadata.get("name", "n/a")
self.capabilities = ", ".join(["%s=%s" % (k, v)
@@ -72,7 +73,7 @@ class ZoneState(object):
"""Something went wrong. Check to see if zone should be
marked as offline."""
self.last_exception = exception
- self.last_exception_time = datetime.now()
+ self.last_exception_time = utils.utcnow()
api_url = self.api_url
logging.warning(_("'%(exception)s' error talking to "
"zone %(api_url)s") % locals())
@@ -88,7 +89,8 @@ class ZoneState(object):
def _call_novaclient(zone):
"""Call novaclient. Broken out for testing purposes."""
- client = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
+ client = novaclient.OpenStack(zone.username, zone.password, None,
+ zone.api_url)
return client.zones.info()._info
@@ -104,7 +106,7 @@ def _poll_zone(zone):
class ZoneManager(object):
"""Keeps the zone states updated."""
def __init__(self):
- self.last_zone_db_check = datetime.min
+ self.last_zone_db_check = datetime.datetime.min
self.zone_states = {} # { <zone_id> : ZoneState }
self.service_states = {} # { <host> : { <service> : { cap k : v }}}
self.green_pool = greenpool.GreenPool()
@@ -158,10 +160,10 @@ class ZoneManager(object):
def ping(self, context=None):
"""Ping should be called periodically to update zone status."""
- diff = datetime.now() - self.last_zone_db_check
+ diff = utils.utcnow() - self.last_zone_db_check
if diff.seconds >= FLAGS.zone_db_check_interval:
logging.debug(_("Updating zone cache from db."))
- self.last_zone_db_check = datetime.now()
+ self.last_zone_db_check = utils.utcnow()
self._refresh_from_db(context)
self._poll_zones(context)
diff --git a/nova/test.py b/nova/test.py
index 80b2d0a74..4a0a18fe7 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -23,7 +23,6 @@ inline callbacks.
"""
-import datetime
import functools
import os
import shutil
@@ -37,6 +36,7 @@ from eventlet import greenthread
from nova import fakerabbit
from nova import flags
from nova import rpc
+from nova import utils
from nova import service
from nova import wsgi
from nova.virt import fake
@@ -69,7 +69,7 @@ class TestCase(unittest.TestCase):
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
- self.start = datetime.datetime.utcnow()
+ self.start = utils.utcnow()
shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db),
os.path.join(FLAGS.state_path, FLAGS.sqlite_db))
@@ -184,7 +184,7 @@ class TestCase(unittest.TestCase):
wsgi.Server.start = _wrapped_start
# Useful assertions
- def assertDictMatch(self, d1, d2):
+ def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
@@ -215,15 +215,26 @@ class TestCase(unittest.TestCase):
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
+ try:
+ error = abs(float(d1value) - float(d2value))
+ within_tolerance = error <= tolerance
+ except (ValueError, TypeError):
+ # If both values aren't convertable to float, just ignore
+ # ValueError if arg is a str, TypeError if it's something else
+ # (like None)
+ within_tolerance = False
+
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
+ elif approx_equal and within_tolerance:
+ continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" % locals())
- def assertDictListMatch(self, L1, L2):
+ def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
"""Assert a list of dicts are equivalent."""
def raise_assertion(msg):
L1str = str(L1)
@@ -239,4 +250,5 @@ class TestCase(unittest.TestCase):
'len(L2)=%(L2count)d' % locals())
for d1, d2 in zip(L1, L2):
- self.assertDictMatch(d1, d2)
+ self.assertDictMatch(d1, d2, approx_equal=approx_equal,
+ tolerance=tolerance)
diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py
index dbdd0928a..03aad007a 100644
--- a/nova/tests/api/openstack/extensions/foxinsocks.py
+++ b/nova/tests/api/openstack/extensions/foxinsocks.py
@@ -17,12 +17,10 @@
import json
-from nova import wsgi
-
from nova.api.openstack import extensions
-class FoxInSocksController(wsgi.Controller):
+class FoxInSocksController(object):
def index(self, req):
return "Try to say this Mr. Knox, sir..."
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 8e0156afa..a10fb7433 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -16,7 +16,6 @@
# under the License.
import copy
-import datetime
import json
import random
import string
@@ -38,6 +37,7 @@ from nova.api.openstack import auth
from nova.api.openstack import versions
from nova.api.openstack import limits
from nova.auth.manager import User, Project
+import nova.image.fake
from nova.image import glance
from nova.image import local
from nova.image import service
@@ -104,10 +104,12 @@ def stub_out_key_pair_funcs(stubs, have_key_pair=True):
def stub_out_image_service(stubs):
- def fake_image_show(meh, context, id):
- return dict(kernelId=1, ramdiskId=1)
-
- stubs.Set(local.LocalImageService, 'show', fake_image_show)
+ def fake_get_image_service(image_href):
+ image_id = int(str(image_href).split('/')[-1])
+ return (nova.image.fake.FakeImageService(), image_id)
+ stubs.Set(nova.image, 'get_image_service', fake_get_image_service)
+ stubs.Set(nova.image, 'get_default_image_service',
+ lambda: nova.image.fake.FakeImageService())
def stub_out_auth(stubs):
@@ -140,7 +142,8 @@ def stub_out_networking(stubs):
def stub_out_compute_api_snapshot(stubs):
def snapshot(self, context, instance_id, name):
- return 123
+ return dict(id='123', status='ACTIVE',
+ properties=dict(instance_id='123'))
stubs.Set(nova.compute.API, 'snapshot', snapshot)
@@ -166,12 +169,34 @@ def stub_out_glance(stubs, initial_fixtures=None):
def __init__(self, initial_fixtures):
self.fixtures = initial_fixtures or []
- def fake_get_images(self, filters=None):
+ def _filter_images(self, filters=None, marker=None, limit=None):
+ found = True
+ if marker:
+ found = False
+ if limit == 0:
+ limit = None
+
+ fixtures = []
+ count = 0
+ for f in self.fixtures:
+ if limit and count >= limit:
+ break
+ if found:
+ fixtures.append(f)
+ count = count + 1
+ if f['id'] == marker:
+ found = True
+
+ return fixtures
+
+ def fake_get_images(self, filters=None, marker=None, limit=None):
+ fixtures = self._filter_images(filters, marker, limit)
return [dict(id=f['id'], name=f['name'])
- for f in self.fixtures]
+ for f in fixtures]
- def fake_get_images_detailed(self, filters=None):
- return copy.deepcopy(self.fixtures)
+ def fake_get_images_detailed(self, filters=None,
+ marker=None, limit=None):
+ return self._filter_images(filters, marker, limit)
def fake_get_image_meta(self, image_id):
image = self._find_image(image_id)
@@ -208,7 +233,7 @@ def stub_out_glance(stubs, initial_fixtures=None):
def _find_image(self, image_id):
for f in self.fixtures:
- if f['id'] == image_id:
+ if str(f['id']) == str(image_id):
return f
return None
@@ -253,7 +278,7 @@ class FakeAuthDatabase(object):
@staticmethod
def auth_token_create(context, token):
- fake_token = FakeToken(created_at=datetime.datetime.now(), **token)
+ fake_token = FakeToken(created_at=utils.utcnow(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@@ -328,6 +353,11 @@ class FakeAuthManager(object):
return user.admin
def is_project_member(self, user, project):
+ if not isinstance(project, Project):
+ try:
+ project = self.get_project(project)
+ except exc.NotFound:
+ raise webob.exc.HTTPUnauthorized()
return ((user.id in project.member_ids) or
(user.id == project.project_manager_id))
diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py
index c63431a45..7321c329f 100644
--- a/nova/tests/api/openstack/test_api.py
+++ b/nova/tests/api/openstack/test_api.py
@@ -15,6 +15,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import json
+
import webob.exc
import webob.dec
@@ -23,6 +25,7 @@ from webob import Request
from nova import test
from nova.api import openstack
from nova.api.openstack import faults
+from nova.tests.api.openstack import fakes
class APITest(test.TestCase):
@@ -31,6 +34,24 @@ class APITest(test.TestCase):
# simpler version of the app than fakes.wsgi_app
return openstack.FaultWrapper(inner_app)
+ def test_malformed_json(self):
+ req = webob.Request.blank('/')
+ req.method = 'POST'
+ req.body = '{'
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_malformed_xml(self):
+ req = webob.Request.blank('/')
+ req.method = 'POST'
+ req.body = '<hi im not xml>'
+ req.headers["content-type"] = "application/xml"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_exceptions_are_converted_to_faults(self):
@webob.dec.wsgify
diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py
index 8f189c744..af3478c7d 100644
--- a/nova/tests/api/openstack/test_auth.py
+++ b/nova/tests/api/openstack/test_auth.py
@@ -114,6 +114,28 @@ class Test(test.TestCase):
self.assertEqual(result.status, '401 Unauthorized')
self.assertEqual(self.destroy_called, True)
+ def test_authorize_project(self):
+ f = fakes.FakeAuthManager()
+ user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
+ f.add_user(user)
+ f.create_project('user1_project', user)
+ f.create_project('user2_project', user)
+
+ req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '204 No Content')
+
+ token = result.headers['X-Auth-Token']
+ self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter)
+ req = webob.Request.blank('/v1.0/fake')
+ req.headers['X-Auth-Token'] = token
+ req.headers['X-Auth-Project-Id'] = 'user2_project'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '200 OK')
+ self.assertEqual(result.headers['X-Test-Success'], 'True')
+
def test_bad_user_bad_key(self):
req = webob.Request.blank('/v1.0/')
req.headers['X-Auth-User'] = 'unknown_user'
@@ -143,6 +165,49 @@ class Test(test.TestCase):
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '401 Unauthorized')
+ def test_bad_project(self):
+ f = fakes.FakeAuthManager()
+ user1 = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
+ user2 = nova.auth.manager.User('id2', 'user2', 'user2_key', None, None)
+ f.add_user(user1)
+ f.add_user(user2)
+ f.create_project('user1_project', user1)
+ f.create_project('user2_project', user2)
+
+ req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '204 No Content')
+
+ token = result.headers['X-Auth-Token']
+ self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter)
+ req = webob.Request.blank('/v1.0/fake')
+ req.headers['X-Auth-Token'] = token
+ req.headers['X-Auth-Project-Id'] = 'user2_project'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '401 Unauthorized')
+
+ def test_not_existing_project(self):
+ f = fakes.FakeAuthManager()
+ user1 = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
+ f.add_user(user1)
+ f.create_project('user1_project', user1)
+
+ req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '204 No Content')
+
+ token = result.headers['X-Auth-Token']
+ self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter)
+ req = webob.Request.blank('/v1.0/fake')
+ req.headers['X-Auth-Token'] = token
+ req.headers['X-Auth-Project-Id'] = 'unknown_project'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '401 Unauthorized')
+
class TestFunctional(test.TestCase):
def test_token_expiry(self):
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 8f57c5b67..9a9d9125c 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -24,7 +24,7 @@ import webob.exc
from webob import Request
from nova import test
-from nova.api.openstack.common import limited
+from nova.api.openstack import common
class LimiterTest(test.TestCase):
@@ -35,9 +35,7 @@ class LimiterTest(test.TestCase):
"""
def setUp(self):
- """
- Run before each test.
- """
+ """ Run before each test. """
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
@@ -45,127 +43,144 @@ class LimiterTest(test.TestCase):
self.large = range(10000)
def test_limiter_offset_zero(self):
- """
- Test offset key works with 0.
- """
+ """ Test offset key works with 0. """
req = Request.blank('/?offset=0')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium)
- self.assertEqual(limited(self.large, req), self.large[:1000])
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
- """
- Test offset key works with a medium sized number.
- """
+ """ Test offset key works with a medium sized number. """
req = Request.blank('/?offset=10')
- self.assertEqual(limited(self.tiny, req), [])
- self.assertEqual(limited(self.small, req), self.small[10:])
- self.assertEqual(limited(self.medium, req), self.medium[10:])
- self.assertEqual(limited(self.large, req), self.large[10:1010])
+ self.assertEqual(common.limited(self.tiny, req), [])
+ self.assertEqual(common.limited(self.small, req), self.small[10:])
+ self.assertEqual(common.limited(self.medium, req), self.medium[10:])
+ self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
- """
- Test offset key works with a number over 1000 (max_limit).
- """
+ """ Test offset key works with a number over 1000 (max_limit). """
req = Request.blank('/?offset=1001')
- self.assertEqual(limited(self.tiny, req), [])
- self.assertEqual(limited(self.small, req), [])
- self.assertEqual(limited(self.medium, req), [])
- self.assertEqual(limited(self.large, req), self.large[1001:2001])
+ self.assertEqual(common.limited(self.tiny, req), [])
+ self.assertEqual(common.limited(self.small, req), [])
+ self.assertEqual(common.limited(self.medium, req), [])
+ self.assertEqual(
+ common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
- """
- Test offset key works with a blank offset.
- """
+ """ Test offset key works with a blank offset. """
req = Request.blank('/?offset=')
- self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
- """
- Test offset key works with a BAD offset.
- """
+ """ Test offset key works with a BAD offset. """
req = Request.blank(u'/?offset=\u0020aa')
- self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
- """
- Test request with no offset or limit
- """
+ """ Test request with no offset or limit """
req = Request.blank('/')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium)
- self.assertEqual(limited(self.large, req), self.large[:1000])
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
- """
- Test limit of zero.
- """
+ """ Test limit of zero. """
req = Request.blank('/?limit=0')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium)
- self.assertEqual(limited(self.large, req), self.large[:1000])
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_medium(self):
- """
- Test limit of 10.
- """
+ """ Test limit of 10. """
req = Request.blank('/?limit=10')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium[:10])
- self.assertEqual(limited(self.large, req), self.large[:10])
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium[:10])
+ self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
- """
- Test limit of 3000.
- """
+ """ Test limit of 3000. """
req = Request.blank('/?limit=3000')
- self.assertEqual(limited(self.tiny, req), self.tiny)
- self.assertEqual(limited(self.small, req), self.small)
- self.assertEqual(limited(self.medium, req), self.medium)
- self.assertEqual(limited(self.large, req), self.large[:1000])
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
- """
- Test request with both limit and offset.
- """
+ """ Test request with both limit and offset. """
items = range(2000)
req = Request.blank('/?offset=1&limit=3')
- self.assertEqual(limited(items, req), items[1:4])
+ self.assertEqual(common.limited(items, req), items[1:4])
req = Request.blank('/?offset=3&limit=0')
- self.assertEqual(limited(items, req), items[3:1003])
+ self.assertEqual(common.limited(items, req), items[3:1003])
req = Request.blank('/?offset=3&limit=1500')
- self.assertEqual(limited(items, req), items[3:1003])
+ self.assertEqual(common.limited(items, req), items[3:1003])
req = Request.blank('/?offset=3000&limit=10')
- self.assertEqual(limited(items, req), [])
+ self.assertEqual(common.limited(items, req), [])
def test_limiter_custom_max_limit(self):
- """
- Test a max_limit other than 1000.
- """
+ """ Test a max_limit other than 1000. """
items = range(2000)
req = Request.blank('/?offset=1&limit=3')
- self.assertEqual(limited(items, req, max_limit=2000), items[1:4])
+ self.assertEqual(
+ common.limited(items, req, max_limit=2000), items[1:4])
req = Request.blank('/?offset=3&limit=0')
- self.assertEqual(limited(items, req, max_limit=2000), items[3:])
+ self.assertEqual(
+ common.limited(items, req, max_limit=2000), items[3:])
req = Request.blank('/?offset=3&limit=2500')
- self.assertEqual(limited(items, req, max_limit=2000), items[3:])
+ self.assertEqual(
+ common.limited(items, req, max_limit=2000), items[3:])
req = Request.blank('/?offset=3000&limit=10')
- self.assertEqual(limited(items, req, max_limit=2000), [])
+ self.assertEqual(common.limited(items, req, max_limit=2000), [])
def test_limiter_negative_limit(self):
- """
- Test a negative limit.
- """
+ """ Test a negative limit. """
req = Request.blank('/?limit=-3000')
- self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
- """
- Test a negative offset.
- """
+ """ Test a negative offset. """
req = Request.blank('/?offset=-30')
- self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
+
+
+class PaginationParamsTest(test.TestCase):
+ """
+ Unit tests for the `nova.api.openstack.common.get_pagination_params`
+ method which takes in a request object and returns 'marker' and 'limit'
+ GET params.
+ """
+
+ def test_no_params(self):
+ """ Test no params. """
+ req = Request.blank('/')
+ self.assertEqual(common.get_pagination_params(req), (0, 0))
+
+ def test_valid_marker(self):
+ """ Test valid marker param. """
+ req = Request.blank('/?marker=1')
+ self.assertEqual(common.get_pagination_params(req), (1, 0))
+
+ def test_invalid_marker(self):
+ """ Test invalid marker param. """
+ req = Request.blank('/?marker=-2')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.get_pagination_params, req)
+
+ def test_valid_limit(self):
+ """ Test valid limit param. """
+ req = Request.blank('/?limit=10')
+ self.assertEqual(common.get_pagination_params(req), (0, 10))
+
+ def test_invalid_limit(self):
+ """ Test invalid limit param. """
+ req = Request.blank('/?limit=-2')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.get_pagination_params, req)
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index 544298602..60914c0a3 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -26,15 +26,15 @@ from nova import flags
from nova.api import openstack
from nova.api.openstack import extensions
from nova.api.openstack import flavors
+from nova.api.openstack import wsgi
from nova.tests.api.openstack import fakes
-import nova.wsgi
FLAGS = flags.FLAGS
response_body = "Try to say this Mr. Knox, sir..."
-class StubController(nova.wsgi.Controller):
+class StubController(object):
def __init__(self, body):
self.body = body
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 9f1f28611..be777df9b 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -22,7 +22,6 @@ and as a WSGI layer
import copy
import json
-import datetime
import os
import shutil
import tempfile
@@ -128,7 +127,7 @@ class _BaseImageServiceTests(test.TestCase):
@staticmethod
def _make_fixture(name):
- fixture = {'name': 'test image',
+ fixture = {'name': name,
'updated': None,
'created': None,
'status': None,
@@ -227,6 +226,127 @@ class GlanceImageServiceTest(_BaseImageServiceTests):
expected = {'name': 'test image', 'properties': {}}
self.assertDictMatch(self.sent_to_glance['metadata'], expected)
+ def test_index_default_limit(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.index(self.context)
+ i = 0
+ for meta in image_metas:
+ expected = {'id': 'DONTCARE',
+ 'name': 'TestImage %d' % (i)}
+ self.assertDictMatch(meta, expected)
+ i = i + 1
+
+ def test_index_marker(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.index(self.context, marker=ids[1])
+ self.assertEquals(len(image_metas), 8)
+ i = 2
+ for meta in image_metas:
+ expected = {'id': 'DONTCARE',
+ 'name': 'TestImage %d' % (i)}
+ self.assertDictMatch(meta, expected)
+ i = i + 1
+
+ def test_index_limit(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.index(self.context, limit=3)
+ self.assertEquals(len(image_metas), 3)
+
+ def test_index_marker_and_limit(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.index(self.context, marker=ids[3], limit=1)
+ self.assertEquals(len(image_metas), 1)
+ i = 4
+ for meta in image_metas:
+ expected = {'id': 'DONTCARE',
+ 'name': 'TestImage %d' % (i)}
+ self.assertDictMatch(meta, expected)
+ i = i + 1
+
+ def test_detail_marker(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.detail(self.context, marker=ids[1])
+ self.assertEquals(len(image_metas), 8)
+ i = 2
+ for meta in image_metas:
+ expected = {
+ 'id': 'DONTCARE',
+ 'status': None,
+ 'is_public': True,
+ 'name': 'TestImage %d' % (i),
+ 'properties': {
+ 'updated': None,
+ 'created': None,
+ },
+ }
+
+ self.assertDictMatch(meta, expected)
+ i = i + 1
+
+ def test_detail_limit(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.detail(self.context, limit=3)
+ self.assertEquals(len(image_metas), 3)
+
+ def test_detail_marker_and_limit(self):
+ fixtures = []
+ ids = []
+ for i in range(10):
+ fixture = self._make_fixture('TestImage %d' % (i))
+ fixtures.append(fixture)
+ ids.append(self.service.create(self.context, fixture)['id'])
+
+ image_metas = self.service.detail(self.context, marker=ids[3], limit=3)
+ self.assertEquals(len(image_metas), 3)
+ i = 4
+ for meta in image_metas:
+ expected = {
+ 'id': 'DONTCARE',
+ 'status': None,
+ 'is_public': True,
+ 'name': 'TestImage %d' % (i),
+ 'properties': {
+ 'updated': None, 'created': None},
+ }
+ self.assertDictMatch(meta, expected)
+ i = i + 1
+
class ImageControllerWithGlanceServiceTest(test.TestCase):
"""
@@ -249,6 +369,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs)
self.fixtures = self._make_image_fixtures()
fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures)
+ fakes.stub_out_compute_api_snapshot(self.stubs)
def tearDown(self):
"""Run after each test."""
@@ -714,7 +835,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'name': 'testname'}
- image_service.index(context, filters).AndReturn([])
+ image_service.index(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?name=testname')
@@ -728,7 +850,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'status': 'ACTIVE'}
- image_service.index(context, filters).AndReturn([])
+ image_service.index(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?status=ACTIVE')
@@ -742,7 +865,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'property-test': '3'}
- image_service.index(context, filters).AndReturn([])
+ image_service.index(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?property-test=3')
@@ -756,7 +880,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'status': 'ACTIVE'}
- image_service.index(context, filters).AndReturn([])
+ image_service.index(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname')
@@ -770,7 +895,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {}
- image_service.index(context, filters).AndReturn([])
+ image_service.index(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images')
@@ -784,7 +910,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'name': 'testname'}
- image_service.detail(context, filters).AndReturn([])
+ image_service.detail(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?name=testname')
@@ -798,7 +925,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'status': 'ACTIVE'}
- image_service.detail(context, filters).AndReturn([])
+ image_service.detail(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?status=ACTIVE')
@@ -812,7 +940,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'property-test': '3'}
- image_service.detail(context, filters).AndReturn([])
+ image_service.detail(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?property-test=3')
@@ -826,7 +955,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {'status': 'ACTIVE'}
- image_service.detail(context, filters).AndReturn([])
+ image_service.detail(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname')
@@ -840,7 +970,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_service = mocker.CreateMockAnything()
context = object()
filters = {}
- image_service.detail(context, filters).AndReturn([])
+ image_service.detail(
+ context, filters=filters, marker=0, limit=0).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail')
@@ -871,6 +1002,79 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 404)
+ def test_create_image(self):
+
+ body = dict(image=dict(serverId='123', name='Backup 1'))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, response.status_int)
+
+ def test_create_image_no_server_id(self):
+
+ body = dict(image=dict(name='Backup 1'))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_image_v1_1(self):
+
+ body = dict(image=dict(serverRef='123', name='Backup 1'))
+ req = webob.Request.blank('/v1.1/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, response.status_int)
+
+ def test_create_image_v1_1_xml_serialization(self):
+
+ body = dict(image=dict(serverRef='123', name='Backup 1'))
+ req = webob.Request.blank('/v1.1/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ req.headers["accept"] = "application/xml"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, response.status_int)
+ resp_xml = minidom.parseString(response.body.replace(" ", ""))
+ expected_href = "http://localhost/v1.1/images/123"
+ expected_image = minidom.parseString("""
+ <image
+ created="None"
+ id="123"
+ name="None"
+ serverRef="http://localhost/v1.1/servers/123"
+ status="ACTIVE"
+ updated="None"
+ xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <links>
+ <link href="%(expected_href)s" rel="self"/>
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/json" />
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/xml" />
+ </links>
+ </image>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected_image.toxml(), resp_xml.toxml())
+
+ def test_create_image_v1_1_no_server_ref(self):
+
+ body = dict(image=dict(name='Backup 1'))
+ req = webob.Request.blank('/v1.1/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
@classmethod
def _make_image_fixtures(cls):
image_id = 123
diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py
index 70f59eda6..01613d1d8 100644
--- a/nova/tests/api/openstack/test_limits.py
+++ b/nova/tests/api/openstack/test_limits.py
@@ -73,7 +73,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite):
def setUp(self):
"""Run before each test."""
BaseLimitTestSuite.setUp(self)
- self.controller = limits.LimitsControllerV10()
+ self.controller = limits.create_resource('1.0')
def _get_index_request(self, accept_header="application/json"):
"""Helper to set routing arguments."""
@@ -209,7 +209,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite):
def setUp(self):
"""Run before each test."""
BaseLimitTestSuite.setUp(self)
- self.controller = limits.LimitsControllerV11()
+ self.controller = limits.create_resource('1.1')
def _get_index_request(self, accept_header="application/json"):
"""Helper to set routing arguments."""
diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py
index c4d1d4fd8..b583d40fe 100644
--- a/nova/tests/api/openstack/test_server_metadata.py
+++ b/nova/tests/api/openstack/test_server_metadata.py
@@ -89,6 +89,7 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['metadata']['key1'])
def test_index_no_data(self):
@@ -99,6 +100,7 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual(0, len(res_dict['metadata']))
def test_show(self):
@@ -109,6 +111,7 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value5', res_dict['key5'])
def test_show_meta_not_found(self):
@@ -140,8 +143,19 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['metadata']['key1'])
+ def test_create_empty_body(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta')
+ req.environ['api.version'] = '1.1'
+ req.method = 'POST'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
def test_update_item(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
@@ -152,9 +166,20 @@ class ServerMetaDataTest(unittest.TestCase):
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
res_dict = json.loads(res.body)
self.assertEqual('value1', res_dict['key1'])
+ def test_update_item_empty_body(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
def test_update_item_too_many_keys(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index fbde5c9ce..8357df594 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -16,7 +16,6 @@
# under the License.
import base64
-import datetime
import json
import unittest
from xml.dom import minidom
@@ -29,14 +28,18 @@ from nova import db
from nova import exception
from nova import flags
from nova import test
+from nova import utils
import nova.api.openstack
from nova.api.openstack import servers
+from nova.api.openstack import create_instance_helper
import nova.compute.api
from nova.compute import instance_types
from nova.compute import power_state
import nova.db.api
+import nova.scheduler.api
from nova.db.sqlalchemy.models import Instance
from nova.db.sqlalchemy.models import InstanceMetadata
+import nova.image.fake
import nova.rpc
from nova.tests.api.openstack import common
from nova.tests.api.openstack import fakes
@@ -67,6 +70,34 @@ def return_servers(context, user_id=1):
return [stub_instance(i, user_id) for i in xrange(5)]
+def return_servers_by_reservation(context, reservation_id=""):
+ return [stub_instance(i, reservation_id) for i in xrange(5)]
+
+
+def return_servers_by_reservation_empty(context, reservation_id=""):
+ return []
+
+
+def return_servers_from_child_zones_empty(*args, **kwargs):
+ return []
+
+
+def return_servers_from_child_zones(*args, **kwargs):
+ class Server(object):
+ pass
+
+ zones = []
+ for zone in xrange(3):
+ servers = []
+ for server_id in xrange(5):
+ server = Server()
+ server._info = stub_instance(server_id, reservation_id="child")
+ servers.append(server)
+
+ zones.append(("Zone%d" % zone, servers))
+ return zones
+
+
def return_security_group(context, instance_id, security_group_id):
pass
@@ -80,7 +111,7 @@ def instance_address(context, instance_id):
def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
- host=None, power_state=0):
+ host=None, power_state=0, reservation_id=""):
metadata = []
metadata.append(InstanceMetadata(key='seq', value=id))
@@ -92,12 +123,17 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
if host is not None:
host = str(host)
+ # ReservationID isn't sent back, hack it in there.
+ server_name = "server%s" % id
+ if reservation_id != "":
+ server_name = "reservation_%s" % (reservation_id, )
+
instance = {
"id": id,
"admin_pass": "",
"user_id": user_id,
"project_id": "",
- "image_id": "10",
+ "image_ref": "10",
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
@@ -112,13 +148,13 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
"host": host,
"instance_type": dict(inst_type),
"user_data": "",
- "reservation_id": "",
+ "reservation_id": reservation_id,
"mac_address": "",
- "scheduled_at": datetime.datetime.now(),
- "launched_at": datetime.datetime.now(),
- "terminated_at": datetime.datetime.now(),
+ "scheduled_at": utils.utcnow(),
+ "launched_at": utils.utcnow(),
+ "terminated_at": utils.utcnow(),
"availability_zone": "",
- "display_name": "server%s" % id,
+ "display_name": server_name,
"display_description": "",
"locked": False,
"metadata": metadata}
@@ -217,7 +253,6 @@ class ServersTest(test.TestCase):
},
]
- print res_dict['server']
self.assertEqual(res_dict['server']['links'], expected_links)
def test_get_server_by_id_with_addresses_xml(self):
@@ -364,6 +399,57 @@ class ServersTest(test.TestCase):
self.assertEqual(s.get('imageId', None), None)
i += 1
+ def test_get_server_list_with_reservation_id(self):
+ self.stubs.Set(nova.db.api, 'instance_get_all_by_reservation',
+ return_servers_by_reservation)
+ self.stubs.Set(nova.scheduler.api, 'call_zone_method',
+ return_servers_from_child_zones)
+ req = webob.Request.blank('/v1.0/servers?reservation_id=foo')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ i = 0
+ for s in res_dict['servers']:
+ if '_is_precooked' in s:
+ self.assertEqual(s.get('reservation_id'), 'child')
+ else:
+ self.assertEqual(s.get('name'), 'server%d' % i)
+ i += 1
+
+ def test_get_server_list_with_reservation_id_empty(self):
+ self.stubs.Set(nova.db.api, 'instance_get_all_by_reservation',
+ return_servers_by_reservation_empty)
+ self.stubs.Set(nova.scheduler.api, 'call_zone_method',
+ return_servers_from_child_zones_empty)
+ req = webob.Request.blank('/v1.0/servers/detail?reservation_id=foo')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ i = 0
+ for s in res_dict['servers']:
+ if '_is_precooked' in s:
+ self.assertEqual(s.get('reservation_id'), 'child')
+ else:
+ self.assertEqual(s.get('name'), 'server%d' % i)
+ i += 1
+
+ def test_get_server_list_with_reservation_id_details(self):
+ self.stubs.Set(nova.db.api, 'instance_get_all_by_reservation',
+ return_servers_by_reservation)
+ self.stubs.Set(nova.scheduler.api, 'call_zone_method',
+ return_servers_from_child_zones)
+ req = webob.Request.blank('/v1.0/servers/detail?reservation_id=foo')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ i = 0
+ for s in res_dict['servers']:
+ if '_is_precooked' in s:
+ self.assertEqual(s.get('reservation_id'), 'child')
+ else:
+ self.assertEqual(s.get('name'), 'server%d' % i)
+ i += 1
+
def test_get_server_list_v1_1(self):
req = webob.Request.blank('/v1.1/servers')
res = req.get_response(fakes.wsgi_app())
@@ -483,10 +569,9 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.db.api, 'queue_get_for', queue_get_for)
self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip',
fake_method)
- self.stubs.Set(nova.api.openstack.servers.Controller,
+ self.stubs.Set(
+ nova.api.openstack.create_instance_helper.CreateInstanceHelper,
"_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping)
- self.stubs.Set(nova.api.openstack.common,
- "get_image_id_from_image_hash", image_id_from_hash)
self.stubs.Set(nova.compute.api.API, "_find_host", find_host)
def _test_create_instance_helper(self):
@@ -514,6 +599,48 @@ class ServersTest(test.TestCase):
def test_create_instance(self):
self._test_create_instance_helper()
+ def test_create_instance_via_zones(self):
+ """Server generated ReservationID"""
+ self._setup_for_create_instance()
+ FLAGS.allow_admin_api = True
+
+ body = dict(server=dict(
+ name='server_test', imageId=3, flavorId=2,
+ metadata={'hello': 'world', 'open': 'stack'},
+ personality={}))
+ req = webob.Request.blank('/v1.0/zones/boot')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+
+ reservation_id = json.loads(res.body)['reservation_id']
+ self.assertEqual(res.status_int, 200)
+ self.assertNotEqual(reservation_id, "")
+ self.assertNotEqual(reservation_id, None)
+ self.assertTrue(len(reservation_id) > 1)
+
+ def test_create_instance_via_zones_with_resid(self):
+ """User supplied ReservationID"""
+ self._setup_for_create_instance()
+ FLAGS.allow_admin_api = True
+
+ body = dict(server=dict(
+ name='server_test', imageId=3, flavorId=2,
+ metadata={'hello': 'world', 'open': 'stack'},
+ personality={}, reservation_id='myresid'))
+ req = webob.Request.blank('/v1.0/zones/boot')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+
+ reservation_id = json.loads(res.body)['reservation_id']
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(reservation_id, "myresid")
+
def test_create_instance_no_key_pair(self):
fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
self._test_create_instance_helper()
@@ -589,12 +716,12 @@ class ServersTest(test.TestCase):
def test_create_instance_v1_1(self):
self._setup_for_create_instance()
- image_ref = 'http://localhost/v1.1/images/2'
+ image_href = 'http://localhost/v1.1/images/2'
flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
- 'imageRef': image_ref,
+ 'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
@@ -616,16 +743,16 @@ class ServersTest(test.TestCase):
self.assertEqual('server_test', server['name'])
self.assertEqual(1, server['id'])
self.assertEqual(flavor_ref, server['flavorRef'])
- self.assertEqual(image_ref, server['imageRef'])
+ self.assertEqual(image_href, server['imageRef'])
self.assertEqual(res.status_int, 200)
def test_create_instance_v1_1_bad_href(self):
self._setup_for_create_instance()
- image_ref = 'http://localhost/v1.1/images/asdf'
+ image_href = 'http://localhost/v1.1/images/asdf'
flavor_ref = 'http://localhost/v1.1/flavors/3'
body = dict(server=dict(
- name='server_test', imageRef=image_ref, flavorRef=flavor_ref,
+ name='server_test', imageRef=image_href, flavorRef=flavor_ref,
metadata={'hello': 'world', 'open': 'stack'},
personality={}))
req = webob.Request.blank('/v1.1/servers')
@@ -638,13 +765,12 @@ class ServersTest(test.TestCase):
def test_create_instance_v1_1_local_href(self):
self._setup_for_create_instance()
- image_ref = 'http://localhost/v1.1/images/2'
- image_ref_local = '2'
+ image_id = 2
flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
- 'imageRef': image_ref_local,
+ 'imageRef': image_id,
'flavorRef': flavor_ref,
},
}
@@ -659,7 +785,7 @@ class ServersTest(test.TestCase):
server = json.loads(res.body)['server']
self.assertEqual(1, server['id'])
self.assertEqual(flavor_ref, server['flavorRef'])
- self.assertEqual(image_ref, server['imageRef'])
+ self.assertEqual(image_id, server['imageRef'])
self.assertEqual(res.status_int, 200)
def test_create_instance_with_admin_pass_v1_0(self):
@@ -686,12 +812,12 @@ class ServersTest(test.TestCase):
def test_create_instance_with_admin_pass_v1_1(self):
self._setup_for_create_instance()
- image_ref = 'http://localhost/v1.1/images/2'
+ image_href = 'http://localhost/v1.1/images/2'
flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
- 'imageRef': image_ref,
+ 'imageRef': image_href,
'flavorRef': flavor_ref,
'adminPass': 'testpass',
},
@@ -708,12 +834,12 @@ class ServersTest(test.TestCase):
def test_create_instance_with_empty_admin_pass_v1_1(self):
self._setup_for_create_instance()
- image_ref = 'http://localhost/v1.1/images/2'
+ image_href = 'http://localhost/v1.1/images/2'
flavor_ref = 'http://localhost/v1.1/flavors/3'
body = {
'server': {
'name': 'server_test',
- 'imageRef': image_ref,
+ 'imageRef': image_href,
'flavorRef': flavor_ref,
'adminPass': '',
},
@@ -773,9 +899,7 @@ class ServersTest(test.TestCase):
self.body = json.dumps(dict(server=inst_dict))
def server_update(context, id, params):
- filtered_dict = dict(
- display_name='server_test'
- )
+ filtered_dict = dict(display_name='server_test')
self.assertEqual(params, filtered_dict)
return filtered_dict
@@ -844,7 +968,6 @@ class ServersTest(test.TestCase):
req = webob.Request.blank('/v1.0/servers/detail')
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
- print res.body
dom = minidom.parseString(res.body)
for i, server in enumerate(dom.getElementsByTagName('server')):
self.assertEqual(server.getAttribute('id'), str(i))
@@ -865,7 +988,7 @@ class ServersTest(test.TestCase):
self.assertEqual(s['id'], i)
self.assertEqual(s['hostId'], '')
self.assertEqual(s['name'], 'server%d' % i)
- self.assertEqual(s['imageId'], '10')
+ self.assertEqual(s['imageId'], 10)
self.assertEqual(s['flavorId'], 1)
self.assertEqual(s['status'], 'BUILD')
self.assertEqual(s['metadata']['seq'], str(i))
@@ -879,7 +1002,7 @@ class ServersTest(test.TestCase):
self.assertEqual(s['id'], i)
self.assertEqual(s['hostId'], '')
self.assertEqual(s['name'], 'server%d' % i)
- self.assertEqual(s['imageRef'], 'http://localhost/v1.1/images/10')
+ self.assertEqual(s['imageRef'], 10)
self.assertEqual(s['flavorRef'], 'http://localhost/v1.1/flavors/1')
self.assertEqual(s['status'], 'BUILD')
self.assertEqual(s['metadata']['seq'], str(i))
@@ -911,7 +1034,7 @@ class ServersTest(test.TestCase):
self.assertEqual(s['id'], i)
self.assertEqual(s['hostId'], host_ids[i % 2])
self.assertEqual(s['name'], 'server%d' % i)
- self.assertEqual(s['imageId'], '10')
+ self.assertEqual(s['imageId'], 10)
self.assertEqual(s['flavorId'], 1)
def test_server_pause(self):
@@ -1008,6 +1131,14 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 501)
+ def test_server_change_password_xml(self):
+ req = webob.Request.blank('/v1.0/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/xml'
+ req.body = '<changePassword adminPass="1234pass">'
+# res = req.get_response(fakes.wsgi_app())
+# self.assertEqual(res.status_int, 501)
+
def test_server_change_password_v1_1(self):
mock_method = MockSetAdminPassword()
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
@@ -1267,6 +1398,25 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 202)
self.assertEqual(self.resize_called, True)
+ def test_resize_server_v11(self):
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.content_type = 'application/json'
+ req.method = 'POST'
+ body_dict = dict(resize=dict(flavorRef="http://localhost/3"))
+ req.body = json.dumps(body_dict)
+
+ self.resize_called = False
+
+ def resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(self.resize_called, True)
+
def test_resize_bad_flavor_fails(self):
req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3)))
@@ -1380,13 +1530,13 @@ class ServersTest(test.TestCase):
class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
def setUp(self):
- self.deserializer = servers.ServerCreateRequestXMLDeserializer()
+ self.deserializer = create_instance_helper.ServerXMLDeserializer()
def test_minimal_request(self):
serial_request = """
<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
name="new-server-test" imageId="1" flavorId="1"/>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"server": {
"name": "new-server-test",
"imageId": "1",
@@ -1400,7 +1550,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
name="new-server-test" imageId="1" flavorId="1">
<metadata/>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"server": {
"name": "new-server-test",
"imageId": "1",
@@ -1415,7 +1565,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
name="new-server-test" imageId="1" flavorId="1">
<personality/>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"server": {
"name": "new-server-test",
"imageId": "1",
@@ -1431,7 +1581,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<metadata/>
<personality/>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"server": {
"name": "new-server-test",
"imageId": "1",
@@ -1448,7 +1598,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<personality/>
<metadata/>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"server": {
"name": "new-server-test",
"imageId": "1",
@@ -1466,7 +1616,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<file path="/etc/conf">aabbccdd</file>
</personality>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
self.assertEquals(request["server"]["personality"], expected)
@@ -1476,7 +1626,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
name="new-server-test" imageId="1" flavorId="1">
<personality><file path="/etc/conf">aabbccdd</file>
<file path="/etc/sudoers">abcd</file></personality></server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": "aabbccdd"},
{"path": "/etc/sudoers", "contents": "abcd"}]
self.assertEquals(request["server"]["personality"], expected)
@@ -1492,7 +1642,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<file path="/etc/ignoreme">anything</file>
</personality>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
self.assertEquals(request["server"]["personality"], expected)
@@ -1501,7 +1651,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
name="new-server-test" imageId="1" flavorId="1">
<personality><file>aabbccdd</file></personality></server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"contents": "aabbccdd"}]
self.assertEquals(request["server"]["personality"], expected)
@@ -1510,7 +1660,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
name="new-server-test" imageId="1" flavorId="1">
<personality><file path="/etc/conf"></file></personality></server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": ""}]
self.assertEquals(request["server"]["personality"], expected)
@@ -1519,7 +1669,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
name="new-server-test" imageId="1" flavorId="1">
<personality><file path="/etc/conf"/></personality></server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": ""}]
self.assertEquals(request["server"]["personality"], expected)
@@ -1531,7 +1681,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta key="alpha">beta</meta>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"alpha": "beta"}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1544,7 +1694,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta key="foo">bar</meta>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"alpha": "beta", "foo": "bar"}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1556,7 +1706,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta key="alpha"></meta>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"alpha": ""}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1569,7 +1719,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta key="delta"/>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"alpha": "", "delta": ""}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1581,7 +1731,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta>beta</meta>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"": "beta"}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1594,7 +1744,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta>gamma</meta>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"": "gamma"}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1607,7 +1757,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta key="foo">baz</meta>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"foo": "baz"}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1654,17 +1804,17 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
},
],
}}
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
self.assertEqual(request, expected)
- def test_request_xmlser_with_flavor_image_ref(self):
+ def test_request_xmlser_with_flavor_image_href(self):
serial_request = """
<server xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test"
imageRef="http://localhost:8774/v1.1/images/1"
flavorRef="http://localhost:8774/v1.1/flavors/1">
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
self.assertEquals(request["server"]["flavorRef"],
"http://localhost:8774/v1.1/flavors/1")
self.assertEquals(request["server"]["imageRef"],
@@ -1679,6 +1829,7 @@ class TestServerInstanceCreation(test.TestCase):
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_auth(self.stubs)
+ fakes.stub_out_image_service(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
self.allow_admin = FLAGS.allow_admin_api
@@ -1711,10 +1862,9 @@ class TestServerInstanceCreation(test.TestCase):
compute_api = MockComputeAPI()
self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api))
- self.stubs.Set(nova.api.openstack.servers.Controller,
+ self.stubs.Set(
+ nova.api.openstack.create_instance_helper.CreateInstanceHelper,
'_get_kernel_ramdisk_from_image', make_stub_method((1, 1)))
- self.stubs.Set(nova.api.openstack.common,
- 'get_image_id_from_image_hash', make_stub_method(2))
return compute_api
def _create_personality_request_dict(self, personality_files):
@@ -1969,6 +2119,6 @@ class TestGetKernelRamdiskFromImage(test.TestCase):
@staticmethod
def _get_k_r(image_meta):
"""Rebinding function to a shorter name for convenience"""
- kernel_id, ramdisk_id = \
- servers.Controller._do_get_kernel_ramdisk_from_image(image_meta)
+ kernel_id, ramdisk_id = create_instance_helper.CreateInstanceHelper. \
+ _do_get_kernel_ramdisk_from_image(image_meta)
return kernel_id, ramdisk_id
diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py
new file mode 100644
index 000000000..2fa50ac9b
--- /dev/null
+++ b/nova/tests/api/openstack/test_wsgi.py
@@ -0,0 +1,305 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+import json
+import webob
+
+from nova import exception
+from nova import test
+from nova.api.openstack import wsgi
+
+
+class RequestTest(test.TestCase):
+ def test_content_type_missing(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.body = "<body />"
+ self.assertRaises(exception.InvalidContentType,
+ request.get_content_type)
+
+ def test_content_type_unsupported(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Content-Type"] = "text/html"
+ request.body = "asdf<br />"
+ self.assertRaises(exception.InvalidContentType,
+ request.get_content_type)
+
+ def test_content_type_with_charset(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Content-Type"] = "application/json; charset=UTF-8"
+ result = request.get_content_type()
+ self.assertEqual(result, "application/json")
+
+ def test_content_type_from_accept_xml(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml, application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = \
+ "application/json; q=0.3, application/xml; q=0.9"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_from_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ request = wsgi.Request.blank('/tests/123.json')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123.invalid')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ def test_content_type_accept_and_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ request.headers["Accept"] = "application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_accept_default(self):
+ request = wsgi.Request.blank('/tests/123.unsupported')
+ request.headers["Accept"] = "application/unsupported1"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+
+class DictSerializerTest(test.TestCase):
+ def test_dispatch(self):
+ serializer = wsgi.DictSerializer()
+ serializer.create = lambda x: 'pants'
+ serializer.default = lambda x: 'trousers'
+ self.assertEqual(serializer.serialize({}, 'create'), 'pants')
+
+ def test_dispatch_default(self):
+ serializer = wsgi.DictSerializer()
+ serializer.create = lambda x: 'pants'
+ serializer.default = lambda x: 'trousers'
+ self.assertEqual(serializer.serialize({}, 'update'), 'trousers')
+
+ def test_dispatch_action_None(self):
+ serializer = wsgi.DictSerializer()
+ serializer.create = lambda x: 'pants'
+ serializer.default = lambda x: 'trousers'
+ self.assertEqual(serializer.serialize({}, None), 'trousers')
+
+
+class XMLDictSerializerTest(test.TestCase):
+ def test_xml(self):
+ input_dict = dict(servers=dict(a=(2, 3)))
+ expected_xml = '<serversxmlns="asdf"><a>(2,3)</a></servers>'
+ serializer = wsgi.XMLDictSerializer(xmlns="asdf")
+ result = serializer.serialize(input_dict)
+ result = result.replace('\n', '').replace(' ', '')
+ self.assertEqual(result, expected_xml)
+
+
+class JSONDictSerializerTest(test.TestCase):
+ def test_json(self):
+ input_dict = dict(servers=dict(a=(2, 3)))
+ expected_json = '{"servers":{"a":[2,3]}}'
+ serializer = wsgi.JSONDictSerializer()
+ result = serializer.serialize(input_dict)
+ result = result.replace('\n', '').replace(' ', '')
+ self.assertEqual(result, expected_json)
+
+
+class TextDeserializerTest(test.TestCase):
+ def test_dispatch(self):
+ deserializer = wsgi.TextDeserializer()
+ deserializer.create = lambda x: 'pants'
+ deserializer.default = lambda x: 'trousers'
+ self.assertEqual(deserializer.deserialize({}, 'create'), 'pants')
+
+ def test_dispatch_default(self):
+ deserializer = wsgi.TextDeserializer()
+ deserializer.create = lambda x: 'pants'
+ deserializer.default = lambda x: 'trousers'
+ self.assertEqual(deserializer.deserialize({}, 'update'), 'trousers')
+
+ def test_dispatch_action_None(self):
+ deserializer = wsgi.TextDeserializer()
+ deserializer.create = lambda x: 'pants'
+ deserializer.default = lambda x: 'trousers'
+ self.assertEqual(deserializer.deserialize({}, None), 'trousers')
+
+
+class JSONDeserializerTest(test.TestCase):
+ def test_json(self):
+ data = """{"a": {
+ "a1": "1",
+ "a2": "2",
+ "bs": ["1", "2", "3", {"c": {"c1": "1"}}],
+ "d": {"e": "1"},
+ "f": "1"}}"""
+ as_dict = dict(a={
+ 'a1': '1',
+ 'a2': '2',
+ 'bs': ['1', '2', '3', {'c': dict(c1='1')}],
+ 'd': {'e': '1'},
+ 'f': '1'})
+ deserializer = wsgi.JSONDeserializer()
+ self.assertEqual(deserializer.deserialize(data), as_dict)
+
+
+class XMLDeserializerTest(test.TestCase):
+ def test_xml(self):
+ xml = """
+ <a a1="1" a2="2">
+ <bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
+ <d><e>1</e></d>
+ <f>1</f>
+ </a>
+ """.strip()
+ as_dict = dict(a={
+ 'a1': '1',
+ 'a2': '2',
+ 'bs': ['1', '2', '3', {'c': dict(c1='1')}],
+ 'd': {'e': '1'},
+ 'f': '1'})
+ metadata = {'plurals': {'bs': 'b', 'ts': 't'}}
+ deserializer = wsgi.XMLDeserializer(metadata=metadata)
+ self.assertEqual(deserializer.deserialize(xml), as_dict)
+
+ def test_xml_empty(self):
+ xml = """<a></a>"""
+ as_dict = {"a": {}}
+ deserializer = wsgi.XMLDeserializer()
+ self.assertEqual(deserializer.deserialize(xml), as_dict)
+
+
+class ResponseSerializerTest(test.TestCase):
+ def setUp(self):
+ class JSONSerializer(object):
+ def serialize(self, data, action='default'):
+ return 'pew_json'
+
+ class XMLSerializer(object):
+ def serialize(self, data, action='default'):
+ return 'pew_xml'
+
+ self.serializers = {
+ 'application/json': JSONSerializer(),
+ 'application/XML': XMLSerializer(),
+ }
+
+ self.serializer = wsgi.ResponseSerializer(serializers=self.serializers)
+
+ def tearDown(self):
+ pass
+
+ def test_get_serializer(self):
+ self.assertEqual(self.serializer.get_serializer('application/json'),
+ self.serializers['application/json'])
+
+ def test_get_serializer_unknown_content_type(self):
+ self.assertRaises(exception.InvalidContentType,
+ self.serializer.get_serializer,
+ 'application/unknown')
+
+ def test_serialize_response(self):
+ response = self.serializer.serialize({}, 'application/json')
+ self.assertEqual(response.headers['Content-Type'], 'application/json')
+ self.assertEqual(response.body, 'pew_json')
+
+ def test_serialize_response_dict_to_unknown_content_type(self):
+ self.assertRaises(exception.InvalidContentType,
+ self.serializer.serialize,
+ {}, 'application/unknown')
+
+
+class RequestDeserializerTest(test.TestCase):
+ def setUp(self):
+ class JSONDeserializer(object):
+ def deserialize(self, data, action='default'):
+ return 'pew_json'
+
+ class XMLDeserializer(object):
+ def deserialize(self, data, action='default'):
+ return 'pew_xml'
+
+ self.deserializers = {
+ 'application/json': JSONDeserializer(),
+ 'application/XML': XMLDeserializer(),
+ }
+
+ self.deserializer = wsgi.RequestDeserializer(
+ deserializers=self.deserializers)
+
+ def tearDown(self):
+ pass
+
+ def test_get_deserializer(self):
+ expected = self.deserializer.get_deserializer('application/json')
+ self.assertEqual(expected, self.deserializers['application/json'])
+
+ def test_get_deserializer_unknown_content_type(self):
+ self.assertRaises(exception.InvalidContentType,
+ self.deserializer.get_deserializer,
+ 'application/unknown')
+
+ def test_get_expected_content_type(self):
+ request = wsgi.Request.blank('/')
+ request.headers['Accept'] = 'application/json'
+ self.assertEqual(self.deserializer.get_expected_content_type(request),
+ 'application/json')
+
+ def test_get_action_args(self):
+ env = {
+ 'wsgiorg.routing_args': [None, {
+ 'controller': None,
+ 'format': None,
+ 'action': 'update',
+ 'id': 12,
+ }],
+ }
+
+ expected = {'action': 'update', 'id': 12}
+
+ self.assertEqual(self.deserializer.get_action_args(env), expected)
+
+ def test_deserialize(self):
+ def fake_get_routing_args(request):
+ return {'action': 'create'}
+ self.deserializer.get_action_args = fake_get_routing_args
+
+ request = wsgi.Request.blank('/')
+ request.headers['Accept'] = 'application/xml'
+
+ deserialized = self.deserializer.deserialize(request)
+ expected = ('create', {}, 'application/xml')
+
+ self.assertEqual(expected, deserialized)
+
+
+class ResourceTest(test.TestCase):
+ def test_dispatch(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ resource = wsgi.Resource(Controller())
+ actual = resource.dispatch(None, 'index', {'pants': 'off'})
+ expected = 'off'
+ self.assertEqual(actual, expected)
+
+ def test_dispatch_unknown_controller_action(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ resource = wsgi.Resource(Controller())
+ self.assertRaises(AttributeError, resource.dispatch,
+ None, 'create', {})
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
index fa2e05033..098577e4c 100644
--- a/nova/tests/api/openstack/test_zones.py
+++ b/nova/tests/api/openstack/test_zones.py
@@ -21,7 +21,6 @@ import json
import nova.db
from nova import context
from nova import crypto
-from nova import exception
from nova import flags
from nova import test
from nova.api.openstack import zones
@@ -210,6 +209,11 @@ class ZonesTest(test.TestCase):
self.stubs.Set(api, 'select', zone_select)
req = webob.Request.blank('/v1.0/zones/select')
+ req.method = 'POST'
+ req.headers["Content-Type"] = "application/json"
+ # Select queries end up being JSON encoded twice.
+ # Once to a string and again as an HTTP POST Body
+ req.body = json.dumps(json.dumps({}))
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py
index 5820ecdc2..d33268296 100644
--- a/nova/tests/api/test_wsgi.py
+++ b/nova/tests/api/test_wsgi.py
@@ -67,192 +67,3 @@ class Test(test.TestCase):
self.assertEqual(result.body, "Router result")
result = webob.Request.blank('/bad').get_response(Router())
self.assertNotEqual(result.body, "Router result")
-
-
-class ControllerTest(test.TestCase):
-
- class TestRouter(wsgi.Router):
-
- class TestController(wsgi.Controller):
-
- _serialization_metadata = {
- 'application/xml': {
- "attributes": {
- "test": ["id"]}}}
-
- def show(self, req, id): # pylint: disable=W0622,C0103
- return {"test": {"id": id}}
-
- def __init__(self):
- mapper = routes.Mapper()
- mapper.resource("test", "tests", controller=self.TestController())
- wsgi.Router.__init__(self, mapper)
-
- def test_show(self):
- request = wsgi.Request.blank('/tests/123')
- result = request.get_response(self.TestRouter())
- self.assertEqual(json.loads(result.body), {"test": {"id": "123"}})
-
- def test_response_content_type_from_accept_xml(self):
- request = webob.Request.blank('/tests/123')
- request.headers["Accept"] = "application/xml"
- result = request.get_response(self.TestRouter())
- self.assertEqual(result.headers["Content-Type"], "application/xml")
-
- def test_response_content_type_from_accept_json(self):
- request = wsgi.Request.blank('/tests/123')
- request.headers["Accept"] = "application/json"
- result = request.get_response(self.TestRouter())
- self.assertEqual(result.headers["Content-Type"], "application/json")
-
- def test_response_content_type_from_query_extension_xml(self):
- request = wsgi.Request.blank('/tests/123.xml')
- result = request.get_response(self.TestRouter())
- self.assertEqual(result.headers["Content-Type"], "application/xml")
-
- def test_response_content_type_from_query_extension_json(self):
- request = wsgi.Request.blank('/tests/123.json')
- result = request.get_response(self.TestRouter())
- self.assertEqual(result.headers["Content-Type"], "application/json")
-
- def test_response_content_type_default_when_unsupported(self):
- request = wsgi.Request.blank('/tests/123.unsupported')
- request.headers["Accept"] = "application/unsupported1"
- result = request.get_response(self.TestRouter())
- self.assertEqual(result.status_int, 200)
- self.assertEqual(result.headers["Content-Type"], "application/json")
-
-
-class RequestTest(test.TestCase):
-
- def test_request_content_type_missing(self):
- request = wsgi.Request.blank('/tests/123')
- request.body = "<body />"
- self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
-
- def test_request_content_type_unsupported(self):
- request = wsgi.Request.blank('/tests/123')
- request.headers["Content-Type"] = "text/html"
- request.body = "asdf<br />"
- self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
-
- def test_request_content_type_with_charset(self):
- request = wsgi.Request.blank('/tests/123')
- request.headers["Content-Type"] = "application/json; charset=UTF-8"
- result = request.get_content_type()
- self.assertEqual(result, "application/json")
-
- def test_content_type_from_accept_xml(self):
- request = wsgi.Request.blank('/tests/123')
- request.headers["Accept"] = "application/xml"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/xml")
-
- request = wsgi.Request.blank('/tests/123')
- request.headers["Accept"] = "application/json"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
- request = wsgi.Request.blank('/tests/123')
- request.headers["Accept"] = "application/xml, application/json"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
- request = wsgi.Request.blank('/tests/123')
- request.headers["Accept"] = \
- "application/json; q=0.3, application/xml; q=0.9"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/xml")
-
- def test_content_type_from_query_extension(self):
- request = wsgi.Request.blank('/tests/123.xml')
- result = request.best_match_content_type()
- self.assertEqual(result, "application/xml")
-
- request = wsgi.Request.blank('/tests/123.json')
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
- request = wsgi.Request.blank('/tests/123.invalid')
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
- def test_content_type_accept_and_query_extension(self):
- request = wsgi.Request.blank('/tests/123.xml')
- request.headers["Accept"] = "application/json"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/xml")
-
- def test_content_type_accept_default(self):
- request = wsgi.Request.blank('/tests/123.unsupported')
- request.headers["Accept"] = "application/unsupported1"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
-
-class SerializerTest(test.TestCase):
-
- def test_xml(self):
- input_dict = dict(servers=dict(a=(2, 3)))
- expected_xml = '<servers><a>(2,3)</a></servers>'
- serializer = wsgi.Serializer()
- result = serializer.serialize(input_dict, "application/xml")
- result = result.replace('\n', '').replace(' ', '')
- self.assertEqual(result, expected_xml)
-
- def test_json(self):
- input_dict = dict(servers=dict(a=(2, 3)))
- expected_json = '{"servers":{"a":[2,3]}}'
- serializer = wsgi.Serializer()
- result = serializer.serialize(input_dict, "application/json")
- result = result.replace('\n', '').replace(' ', '')
- self.assertEqual(result, expected_json)
-
- def test_unsupported_content_type(self):
- serializer = wsgi.Serializer()
- self.assertRaises(exception.InvalidContentType, serializer.serialize,
- {}, "text/null")
-
- def test_deserialize_json(self):
- data = """{"a": {
- "a1": "1",
- "a2": "2",
- "bs": ["1", "2", "3", {"c": {"c1": "1"}}],
- "d": {"e": "1"},
- "f": "1"}}"""
- as_dict = dict(a={
- 'a1': '1',
- 'a2': '2',
- 'bs': ['1', '2', '3', {'c': dict(c1='1')}],
- 'd': {'e': '1'},
- 'f': '1'})
- metadata = {}
- serializer = wsgi.Serializer(metadata)
- self.assertEqual(serializer.deserialize(data, "application/json"),
- as_dict)
-
- def test_deserialize_xml(self):
- xml = """
- <a a1="1" a2="2">
- <bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
- <d><e>1</e></d>
- <f>1</f>
- </a>
- """.strip()
- as_dict = dict(a={
- 'a1': '1',
- 'a2': '2',
- 'bs': ['1', '2', '3', {'c': dict(c1='1')}],
- 'd': {'e': '1'},
- 'f': '1'})
- metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})}
- serializer = wsgi.Serializer(metadata)
- self.assertEqual(serializer.deserialize(xml, "application/xml"),
- as_dict)
-
- def test_deserialize_empty_xml(self):
- xml = """<a></a>"""
- as_dict = {"a": {}}
- serializer = wsgi.Serializer()
- self.assertEqual(serializer.deserialize(xml, "application/xml"),
- as_dict)
diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py
index 5872552ec..1e0b90d82 100644
--- a/nova/tests/glance/stubs.py
+++ b/nova/tests/glance/stubs.py
@@ -16,13 +16,14 @@
import StringIO
-import glance.client
+import nova.image
-def stubout_glance_client(stubs, cls):
- """Stubs out glance.client.Client"""
- stubs.Set(glance.client, 'Client',
- lambda *args, **kwargs: cls(*args, **kwargs))
+def stubout_glance_client(stubs):
+ def fake_get_glance_client(image_href):
+ image_id = int(str(image_href).split('/')[-1])
+ return (FakeGlance('foo'), image_id)
+ stubs.Set(nova.image, 'get_glance_client', fake_get_glance_client)
class FakeGlance(object):
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 6d108d494..223e7ae57 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -34,7 +34,7 @@ class StubGlanceClient(object):
def get_image_meta(self, image_id):
return self.images[image_id]
- def get_images_detailed(self, filters=None):
+ def get_images_detailed(self, filters=None, marker=None, limit=None):
return self.images.itervalues()
def get_image(self, image_id):
@@ -60,10 +60,8 @@ class BaseGlanceTest(unittest.TestCase):
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22)
def setUp(self):
- # FIXME(sirp): we can probably use stubs library here rather than
- # dependency injection
self.client = StubGlanceClient(None)
- self.service = glance.GlanceImageService(self.client)
+ self.service = glance.GlanceImageService(client=self.client)
self.context = context.RequestContext(None, None)
def assertDateTimesFilled(self, image_meta):
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index 7f590441e..522c7cb0e 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -27,6 +27,7 @@ from nova import flags
from nova import service
from nova import test # For the flags
from nova.auth import manager
+import nova.image.glance
from nova.log import logging
from nova.tests.integrated.api import client
@@ -151,6 +152,11 @@ class _IntegratedTestBase(test.TestCase):
f = self._get_flags()
self.flags(**f)
+ def fake_get_image_service(image_href):
+ image_id = int(str(image_href).split('/')[-1])
+ return (nova.image.fake.FakeImageService(), image_id)
+ self.stubs.Set(nova.image, 'get_image_service', fake_get_image_service)
+
# set up services
self.start_service('compute')
self.start_service('volume')
@@ -199,19 +205,13 @@ class _IntegratedTestBase(test.TestCase):
LOG.debug("Image: %s" % image)
if 'imageRef' in image:
- image_ref = image['imageRef']
+ image_href = image['imageRef']
else:
- # NOTE(justinsb): The imageRef code hasn't yet landed
- LOG.warning("imageRef not yet in images output")
- image_ref = image['id']
-
- # TODO(justinsb): This is FUBAR
- image_ref = abs(hash(image_ref))
-
- image_ref = 'http://fake.server/%s' % image_ref
+ image_href = image['id']
+ image_href = 'http://fake.server/%s' % image_href
# We now have a valid imageId
- server['imageRef'] = image_ref
+ server['imageRef'] = image_href
# Set a valid flavorId
flavor = self.api.get_flavors()[0]
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index a67fa1bb5..fcb517cf5 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -194,7 +194,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
post = {}
post['rebuild'] = {
"imageRef": "https://localhost/v1.1/32278/images/2",
- "name": "blah"
+ "name": "blah",
}
self.api.post_server_action(created_server_id, post)
@@ -224,7 +224,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
post = {}
post['rebuild'] = {
"imageRef": "https://localhost/v1.1/32278/images/2",
- "name": "blah"
+ "name": "blah",
}
metadata = {}
@@ -267,7 +267,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
post = {}
post['rebuild'] = {
"imageRef": "https://localhost/v1.1/32278/images/2",
- "name": "blah"
+ "name": "blah",
}
metadata = {}
diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py
index 8a9754777..fde32f797 100644
--- a/nova/tests/integrated/test_xml.py
+++ b/nova/tests/integrated/test_xml.py
@@ -32,7 +32,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase):
""""Some basic XML sanity checks."""
def test_namespace_limits(self):
- """/limits should have v1.0 namespace (hasn't changed in 1.1)."""
+ """/limits should have v1.1 namespace (has changed in 1.1)."""
headers = {}
headers['Accept'] = 'application/xml'
@@ -40,7 +40,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase):
data = response.read()
LOG.debug("data: %s" % data)
- prefix = '<limits xmlns="%s"' % common.XML_NS_V10
+ prefix = '<limits xmlns="%s"' % common.XML_NS_V11
self.assertTrue(data.startswith(prefix))
def test_namespace_servers(self):
diff --git a/nova/tests/scheduler/__init__.py b/nova/tests/scheduler/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/tests/scheduler/__init__.py
diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py
new file mode 100644
index 000000000..10eafde08
--- /dev/null
+++ b/nova/tests/scheduler/test_host_filter.py
@@ -0,0 +1,206 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Host Filters.
+"""
+
+import json
+
+from nova import exception
+from nova import flags
+from nova import test
+from nova.scheduler import host_filter
+
+FLAGS = flags.FLAGS
+
+
+class FakeZoneManager:
+ pass
+
+
+class HostFilterTestCase(test.TestCase):
+ """Test case for host filters."""
+
+ def _host_caps(self, multiplier):
+ # Returns host capabilities in the following way:
+ # host1 = memory:free 10 (100max)
+ # disk:available 100 (1000max)
+ # hostN = memory:free 10 + 10N
+ # disk:available 100 + 100N
+ # in other words: hostN has more resources than host0
+ # which means ... don't go above 10 hosts.
+ return {'host_name-description': 'XenServer %s' % multiplier,
+ 'host_hostname': 'xs-%s' % multiplier,
+ 'host_memory_total': 100,
+ 'host_memory_overhead': 10,
+ 'host_memory_free': 10 + multiplier * 10,
+ 'host_memory_free-computed': 10 + multiplier * 10,
+ 'host_other-config': {},
+ 'host_ip_address': '192.168.1.%d' % (100 + multiplier),
+ 'host_cpu_info': {},
+ 'disk_available': 100 + multiplier * 100,
+ 'disk_total': 1000,
+ 'disk_used': 0,
+ 'host_uuid': 'xxx-%d' % multiplier,
+ 'host_name-label': 'xs-%s' % multiplier}
+
+ def setUp(self):
+ self.old_flag = FLAGS.default_host_filter
+ FLAGS.default_host_filter = \
+ 'nova.scheduler.host_filter.AllHostsFilter'
+ self.instance_type = dict(name='tiny',
+ memory_mb=50,
+ vcpus=10,
+ local_gb=500,
+ flavorid=1,
+ swap=500,
+ rxtx_quota=30000,
+ rxtx_cap=200)
+
+ self.zone_manager = FakeZoneManager()
+ states = {}
+ for x in xrange(10):
+ states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
+ self.zone_manager.service_states = states
+
+ def tearDown(self):
+ FLAGS.default_host_filter = self.old_flag
+
+ def test_choose_filter(self):
+ # Test default filter ...
+ hf = host_filter.choose_host_filter()
+ self.assertEquals(hf._full_name(),
+ 'nova.scheduler.host_filter.AllHostsFilter')
+ # Test valid filter ...
+ hf = host_filter.choose_host_filter(
+ 'nova.scheduler.host_filter.InstanceTypeFilter')
+ self.assertEquals(hf._full_name(),
+ 'nova.scheduler.host_filter.InstanceTypeFilter')
+ # Test invalid filter ...
+ try:
+ host_filter.choose_host_filter('does not exist')
+ self.fail("Should not find host filter.")
+ except exception.SchedulerHostFilterNotFound:
+ pass
+
+ def test_all_host_filter(self):
+ hf = host_filter.AllHostsFilter()
+ cooked = hf.instance_type_to_filter(self.instance_type)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
+ self.assertEquals(10, len(hosts))
+ for host, capabilities in hosts:
+ self.assertTrue(host.startswith('host'))
+
+ def test_instance_type_filter(self):
+ hf = host_filter.InstanceTypeFilter()
+ # filter all hosts that can support 50 ram and 500 disk
+ name, cooked = hf.instance_type_to_filter(self.instance_type)
+ self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
+ name)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
+ self.assertEquals(6, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ self.assertEquals('host05', just_hosts[0])
+ self.assertEquals('host10', just_hosts[5])
+
+ def test_json_filter(self):
+ hf = host_filter.JsonFilter()
+ # filter all hosts that can support 50 ram and 500 disk
+ name, cooked = hf.instance_type_to_filter(self.instance_type)
+ self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
+ self.assertEquals(6, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ self.assertEquals('host05', just_hosts[0])
+ self.assertEquals('host10', just_hosts[5])
+
+ # Try some custom queries
+
+ raw = ['or',
+ ['and',
+ ['<', '$compute.host_memory_free', 30],
+ ['<', '$compute.disk_available', 300],
+ ],
+ ['and',
+ ['>', '$compute.host_memory_free', 70],
+ ['>', '$compute.disk_available', 700],
+ ]
+ ]
+ cooked = json.dumps(raw)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
+
+ self.assertEquals(5, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ for index, host in zip([1, 2, 8, 9, 10], just_hosts):
+ self.assertEquals('host%02d' % index, host)
+
+ raw = ['not',
+ ['=', '$compute.host_memory_free', 30],
+ ]
+ cooked = json.dumps(raw)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
+
+ self.assertEquals(9, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
+ self.assertEquals('host%02d' % index, host)
+
+ raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
+ cooked = json.dumps(raw)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
+
+ self.assertEquals(5, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ for index, host in zip([2, 4, 6, 8, 10], just_hosts):
+ self.assertEquals('host%02d' % index, host)
+
+ # Try some bogus input ...
+ raw = ['unknown command', ]
+ cooked = json.dumps(raw)
+ try:
+ hf.filter_hosts(self.zone_manager, cooked)
+ self.fail("Should give KeyError")
+ except KeyError, e:
+ pass
+
+ self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
+ self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
+ self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
+ ['not', True, False, True, False],
+ )))
+
+ try:
+ hf.filter_hosts(self.zone_manager, json.dumps(
+ 'not', True, False, True, False,
+ ))
+ self.fail("Should give KeyError")
+ except KeyError, e:
+ pass
+
+ self.assertFalse(hf.filter_hosts(self.zone_manager,
+ json.dumps(['=', '$foo', 100])))
+ self.assertFalse(hf.filter_hosts(self.zone_manager,
+ json.dumps(['=', '$.....', 100])))
+ self.assertFalse(hf.filter_hosts(self.zone_manager,
+ json.dumps(
+ ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
+
+ self.assertFalse(hf.filter_hosts(self.zone_manager,
+ json.dumps(['=', {}, ['>', '$missing....foo']])))
diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py
new file mode 100644
index 000000000..9a5318aee
--- /dev/null
+++ b/nova/tests/scheduler/test_least_cost_scheduler.py
@@ -0,0 +1,144 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Least Cost Scheduler
+"""
+
+from nova import flags
+from nova import test
+from nova.scheduler import least_cost
+from nova.tests.scheduler import test_zone_aware_scheduler
+
+MB = 1024 * 1024
+FLAGS = flags.FLAGS
+
+
+class FakeHost(object):
+ def __init__(self, host_id, free_ram, io):
+ self.id = host_id
+ self.free_ram = free_ram
+ self.io = io
+
+
+class WeightedSumTestCase(test.TestCase):
+ def test_empty_domain(self):
+ domain = []
+ weighted_fns = []
+ result = least_cost.weighted_sum(domain, weighted_fns)
+ expected = []
+ self.assertEqual(expected, result)
+
+ def test_basic_costing(self):
+ hosts = [
+ FakeHost(1, 512 * MB, 100),
+ FakeHost(2, 256 * MB, 400),
+ FakeHost(3, 512 * MB, 100),
+ ]
+
+ weighted_fns = [
+ (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost*
+ (2, lambda h: h.io), # Avoid high I/O
+ ]
+
+ costs = least_cost.weighted_sum(
+ domain=hosts, weighted_fns=weighted_fns)
+
+ # Each 256 MB unit of free-ram contributes 0.5 points by way of:
+ # cost = weight * (score/max_score) = 1 * (256/512) = 0.5
+ # Each 100 iops of IO adds 0.5 points by way of:
+ # cost = 2 * (100/400) = 2 * 0.25 = 0.5
+ expected = [1.5, 2.5, 1.5]
+ self.assertEqual(expected, costs)
+
+
+class LeastCostSchedulerTestCase(test.TestCase):
+ def setUp(self):
+ super(LeastCostSchedulerTestCase, self).setUp()
+
+ class FakeZoneManager:
+ pass
+
+ zone_manager = FakeZoneManager()
+
+ states = test_zone_aware_scheduler.fake_zone_manager_service_states(
+ num_hosts=10)
+ zone_manager.service_states = states
+
+ self.sched = least_cost.LeastCostScheduler()
+ self.sched.zone_manager = zone_manager
+
+ def tearDown(self):
+ super(LeastCostSchedulerTestCase, self).tearDown()
+
+ def assertWeights(self, expected, num, request_spec, hosts):
+ weighted = self.sched.weigh_hosts(num, request_spec, hosts)
+ self.assertDictListMatch(weighted, expected, approx_equal=True)
+
+ def test_no_hosts(self):
+ num = 1
+ request_spec = {}
+ hosts = []
+
+ expected = []
+ self.assertWeights(expected, num, request_spec, hosts)
+
+ def test_noop_cost_fn(self):
+ FLAGS.least_cost_scheduler_cost_functions = [
+ 'nova.scheduler.least_cost.noop_cost_fn',
+ ]
+ FLAGS.noop_cost_fn_weight = 1
+
+ num = 1
+ request_spec = {}
+ hosts = self.sched.filter_hosts(num, request_spec)
+
+ expected = [dict(weight=1, hostname=hostname)
+ for hostname, caps in hosts]
+ self.assertWeights(expected, num, request_spec, hosts)
+
+ def test_cost_fn_weights(self):
+ FLAGS.least_cost_scheduler_cost_functions = [
+ 'nova.scheduler.least_cost.noop_cost_fn',
+ ]
+ FLAGS.noop_cost_fn_weight = 2
+
+ num = 1
+ request_spec = {}
+ hosts = self.sched.filter_hosts(num, request_spec)
+
+ expected = [dict(weight=2, hostname=hostname)
+ for hostname, caps in hosts]
+ self.assertWeights(expected, num, request_spec, hosts)
+
+ def test_fill_first_cost_fn(self):
+ FLAGS.least_cost_scheduler_cost_functions = [
+ 'nova.scheduler.least_cost.fill_first_cost_fn',
+ ]
+ FLAGS.fill_first_cost_fn_weight = 1
+
+ num = 1
+ request_spec = {}
+ hosts = self.sched.filter_hosts(num, request_spec)
+
+ expected = []
+ for idx, (hostname, caps) in enumerate(hosts):
+ # Costs are normalized so over 10 hosts, each host with increasing
+ # free ram will cost 1/N more. Since the lowest cost host has some
+ # free ram, we add in the 1/N for the base_cost
+ weight = 0.1 + (0.1 * idx)
+ weight_dict = dict(weight=weight, hostname=hostname)
+ expected.append(weight_dict)
+
+ self.assertWeights(expected, num, request_spec, hosts)
diff --git a/nova/tests/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 54b3f80fb..0d7929996 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -61,7 +61,8 @@ class SchedulerTestCase(test.TestCase):
"""Test case for scheduler"""
def setUp(self):
super(SchedulerTestCase, self).setUp()
- self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver')
+ driver = 'nova.tests.scheduler.test_scheduler.TestDriver'
+ self.flags(scheduler_driver=driver)
def _create_compute_service(self):
"""Create compute-manager(ComputeNode and Service record)."""
@@ -196,7 +197,7 @@ class ZoneSchedulerTestCase(test.TestCase):
service.topic = 'compute'
service.id = kwargs['id']
service.availability_zone = kwargs['zone']
- service.created_at = datetime.datetime.utcnow()
+ service.created_at = utils.utcnow()
return service
def test_with_two_zones(self):
@@ -290,7 +291,7 @@ class SimpleDriverTestCase(test.TestCase):
dic['host'] = kwargs.get('host', 'dummy')
s_ref = db.service_create(self.context, dic)
if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():
- t = datetime.datetime.utcnow() - datetime.timedelta(0)
+ t = utils.utcnow() - datetime.timedelta(0)
dic['created_at'] = kwargs.get('created_at', t)
dic['updated_at'] = kwargs.get('updated_at', t)
db.service_update(self.context, s_ref['id'], dic)
@@ -401,7 +402,7 @@ class SimpleDriverTestCase(test.TestCase):
FLAGS.compute_manager)
compute1.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
- now = datetime.datetime.utcnow()
+ now = utils.utcnow()
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
past = now - delta
db.service_update(self.context, s1['id'], {'updated_at': past})
@@ -542,7 +543,7 @@ class SimpleDriverTestCase(test.TestCase):
def test_wont_sechedule_if_specified_host_is_down(self):
compute1 = self.start_service('compute', host='host1')
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
- now = datetime.datetime.utcnow()
+ now = utils.utcnow()
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
past = now - delta
db.service_update(self.context, s1['id'], {'updated_at': past})
@@ -692,7 +693,7 @@ class SimpleDriverTestCase(test.TestCase):
dic = {'instance_id': instance_id, 'size': 1}
v_ref = db.volume_create(self.context, {'instance_id': instance_id,
'size': 1})
- t1 = datetime.datetime.utcnow() - datetime.timedelta(1)
+ t1 = utils.utcnow() - datetime.timedelta(1)
dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',
'topic': 'volume', 'report_count': 0}
s_ref = db.service_create(self.context, dic)
@@ -709,7 +710,7 @@ class SimpleDriverTestCase(test.TestCase):
"""Confirms src-compute node is alive."""
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
- t = datetime.datetime.utcnow() - datetime.timedelta(10)
+ t = utils.utcnow() - datetime.timedelta(10)
s_ref = self._create_compute_service(created_at=t, updated_at=t,
host=i_ref['host'])
@@ -737,7 +738,7 @@ class SimpleDriverTestCase(test.TestCase):
"""Confirms exception raises in case dest host does not exist."""
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
- t = datetime.datetime.utcnow() - datetime.timedelta(10)
+ t = utils.utcnow() - datetime.timedelta(10)
s_ref = self._create_compute_service(created_at=t, updated_at=t,
host=i_ref['host'])
@@ -796,7 +797,7 @@ class SimpleDriverTestCase(test.TestCase):
# mocks for live_migration_common_check()
instance_id = self._create_instance()
i_ref = db.instance_get(self.context, instance_id)
- t1 = datetime.datetime.utcnow() - datetime.timedelta(10)
+ t1 = utils.utcnow() - datetime.timedelta(10)
s_ref = self._create_compute_service(created_at=t1, updated_at=t1,
host=dest)
@@ -1109,10 +1110,4 @@ class CallZoneMethodTest(test.TestCase):
def test_call_zone_method_generates_exception(self):
context = {}
method = 'raises_exception'
- results = api.call_zone_method(context, method)
-
- # FIXME(sirp): for now the _error_trap code is catching errors and
- # converting them to a ("ERROR", "string") tuples. The code (and this
- # test) should eventually handle real exceptions.
- expected = [(1, ('ERROR', 'testing'))]
- self.assertEqual(expected, results)
+ self.assertRaises(Exception, api.call_zone_method, context, method)
diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py
new file mode 100644
index 000000000..37c6488cc
--- /dev/null
+++ b/nova/tests/scheduler/test_zone_aware_scheduler.py
@@ -0,0 +1,296 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Zone Aware Scheduler.
+"""
+
+from nova import exception
+from nova import test
+from nova.scheduler import driver
+from nova.scheduler import zone_aware_scheduler
+from nova.scheduler import zone_manager
+
+
+def _host_caps(multiplier):
+ # Returns host capabilities in the following way:
+ # host1 = memory:free 10 (100max)
+ # disk:available 100 (1000max)
+ # hostN = memory:free 10 + 10N
+ # disk:available 100 + 100N
+ # in other words: hostN has more resources than host0
+ # which means ... don't go above 10 hosts.
+ return {'host_name-description': 'XenServer %s' % multiplier,
+ 'host_hostname': 'xs-%s' % multiplier,
+ 'host_memory_total': 100,
+ 'host_memory_overhead': 10,
+ 'host_memory_free': 10 + multiplier * 10,
+ 'host_memory_free-computed': 10 + multiplier * 10,
+ 'host_other-config': {},
+ 'host_ip_address': '192.168.1.%d' % (100 + multiplier),
+ 'host_cpu_info': {},
+ 'disk_available': 100 + multiplier * 100,
+ 'disk_total': 1000,
+ 'disk_used': 0,
+ 'host_uuid': 'xxx-%d' % multiplier,
+ 'host_name-label': 'xs-%s' % multiplier}
+
+
+def fake_zone_manager_service_states(num_hosts):
+ states = {}
+ for x in xrange(num_hosts):
+ states['host%02d' % (x + 1)] = {'compute': _host_caps(x)}
+ return states
+
+
+class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
+ def filter_hosts(self, num, specs):
+ # NOTE(sirp): this is returning [(hostname, services)]
+ return self.zone_manager.service_states.items()
+
+ def weigh_hosts(self, num, specs, hosts):
+ fake_weight = 99
+ weighted = []
+ for hostname, caps in hosts:
+ weighted.append(dict(weight=fake_weight, name=hostname))
+ return weighted
+
+
+class FakeZoneManager(zone_manager.ZoneManager):
+ def __init__(self):
+ self.service_states = {
+ 'host1': {
+ 'compute': {'ram': 1000},
+ },
+ 'host2': {
+ 'compute': {'ram': 2000},
+ },
+ 'host3': {
+ 'compute': {'ram': 3000},
+ },
+ }
+
+
+class FakeEmptyZoneManager(zone_manager.ZoneManager):
+ def __init__(self):
+ self.service_states = {}
+
+
+def fake_empty_call_zone_method(context, method, specs):
+ return []
+
+
+# Hmm, I should probably be using mox for this.
+was_called = False
+
+
+def fake_provision_resource(context, item, instance_id, request_spec, kwargs):
+ global was_called
+ was_called = True
+
+
+def fake_ask_child_zone_to_create_instance(context, zone_info,
+ request_spec, kwargs):
+ global was_called
+ was_called = True
+
+
+def fake_provision_resource_locally(context, item, instance_id, kwargs):
+ global was_called
+ was_called = True
+
+
+def fake_provision_resource_from_blob(context, item, instance_id,
+ request_spec, kwargs):
+ global was_called
+ was_called = True
+
+
+def fake_decrypt_blob_returns_local_info(blob):
+ return {'foo': True} # values aren't important.
+
+
+def fake_decrypt_blob_returns_child_info(blob):
+ return {'child_zone': True,
+ 'child_blob': True} # values aren't important. Keys are.
+
+
+def fake_call_zone_method(context, method, specs):
+ return [
+ ('zone1', [
+ dict(weight=1, blob='AAAAAAA'),
+ dict(weight=111, blob='BBBBBBB'),
+ dict(weight=112, blob='CCCCCCC'),
+ dict(weight=113, blob='DDDDDDD'),
+ ]),
+ ('zone2', [
+ dict(weight=120, blob='EEEEEEE'),
+ dict(weight=2, blob='FFFFFFF'),
+ dict(weight=122, blob='GGGGGGG'),
+ dict(weight=123, blob='HHHHHHH'),
+ ]),
+ ('zone3', [
+ dict(weight=130, blob='IIIIIII'),
+ dict(weight=131, blob='JJJJJJJ'),
+ dict(weight=132, blob='KKKKKKK'),
+ dict(weight=3, blob='LLLLLLL'),
+ ]),
+ ]
+
+
+class ZoneAwareSchedulerTestCase(test.TestCase):
+ """Test case for Zone Aware Scheduler."""
+
+ def test_zone_aware_scheduler(self):
+ """
+ Create a nested set of FakeZones, ensure that a select call returns the
+ appropriate build plan.
+ """
+ sched = FakeZoneAwareScheduler()
+ self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
+
+ zm = FakeZoneManager()
+ sched.set_zone_manager(zm)
+
+ fake_context = {}
+ build_plan = sched.select(fake_context, {})
+
+ self.assertEqual(15, len(build_plan))
+
+ hostnames = [plan_item['name']
+ for plan_item in build_plan if 'name' in plan_item]
+ self.assertEqual(3, len(hostnames))
+
+ def test_empty_zone_aware_scheduler(self):
+ """
+ Ensure empty hosts & child_zones result in NoValidHosts exception.
+ """
+ sched = FakeZoneAwareScheduler()
+ self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
+
+ zm = FakeEmptyZoneManager()
+ sched.set_zone_manager(zm)
+
+ fake_context = {}
+ self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
+ fake_context, 1,
+ dict(host_filter=None,
+ request_spec={'instance_type': {}}))
+
+ def test_schedule_do_not_schedule_with_hint(self):
+ """
+ Check the local/child zone routing in the run_instance() call.
+ If the zone_blob hint was passed in, don't re-schedule.
+ """
+ global was_called
+ sched = FakeZoneAwareScheduler()
+ was_called = False
+ self.stubs.Set(sched, '_provision_resource', fake_provision_resource)
+ request_spec = {
+ 'instance_properties': {},
+ 'instance_type': {},
+ 'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter',
+ 'blob': "Non-None blob data",
+ }
+
+ result = sched.schedule_run_instance(None, 1, request_spec)
+ self.assertEquals(None, result)
+ self.assertTrue(was_called)
+
+ def test_provision_resource_local(self):
+ """Provision a resource locally or remotely."""
+ global was_called
+ sched = FakeZoneAwareScheduler()
+ was_called = False
+ self.stubs.Set(sched, '_provision_resource_locally',
+ fake_provision_resource_locally)
+
+ request_spec = {'hostname': "foo"}
+ sched._provision_resource(None, request_spec, 1, request_spec, {})
+ self.assertTrue(was_called)
+
+ def test_provision_resource_remote(self):
+ """Provision a resource locally or remotely."""
+ global was_called
+ sched = FakeZoneAwareScheduler()
+ was_called = False
+ self.stubs.Set(sched, '_provision_resource_from_blob',
+ fake_provision_resource_from_blob)
+
+ request_spec = {}
+ sched._provision_resource(None, request_spec, 1, request_spec, {})
+ self.assertTrue(was_called)
+
+ def test_provision_resource_from_blob_empty(self):
+ """Provision a resource locally or remotely given no hints."""
+ global was_called
+ sched = FakeZoneAwareScheduler()
+ request_spec = {}
+ self.assertRaises(zone_aware_scheduler.InvalidBlob,
+ sched._provision_resource_from_blob,
+ None, {}, 1, {}, {})
+
+ def test_provision_resource_from_blob_with_local_blob(self):
+ """
+ Provision a resource locally or remotely when blob hint passed in.
+ """
+ global was_called
+ sched = FakeZoneAwareScheduler()
+ was_called = False
+ self.stubs.Set(sched, '_decrypt_blob',
+ fake_decrypt_blob_returns_local_info)
+ self.stubs.Set(sched, '_provision_resource_locally',
+ fake_provision_resource_locally)
+
+ request_spec = {'blob': "Non-None blob data"}
+
+ sched._provision_resource_from_blob(None, request_spec, 1,
+ request_spec, {})
+ self.assertTrue(was_called)
+
+ def test_provision_resource_from_blob_with_child_blob(self):
+ """
+ Provision a resource locally or remotely when child blob hint
+ passed in.
+ """
+ global was_called
+ sched = FakeZoneAwareScheduler()
+ self.stubs.Set(sched, '_decrypt_blob',
+ fake_decrypt_blob_returns_child_info)
+ was_called = False
+ self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
+ fake_ask_child_zone_to_create_instance)
+
+ request_spec = {'blob': "Non-None blob data"}
+
+ sched._provision_resource_from_blob(None, request_spec, 1,
+ request_spec, {})
+ self.assertTrue(was_called)
+
+ def test_provision_resource_from_blob_with_immediate_child_blob(self):
+ """
+ Provision a resource locally or remotely when blob hint passed in
+ from an immediate child.
+ """
+ global was_called
+ sched = FakeZoneAwareScheduler()
+ was_called = False
+ self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
+ fake_ask_child_zone_to_create_instance)
+
+ request_spec = {'child_blob': True, 'child_zone': True}
+
+ sched._provision_resource_from_blob(None, request_spec, 1,
+ request_spec, {})
+ self.assertTrue(was_called)
diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py
index f02dd94b7..7d00bddfe 100644
--- a/nova/tests/test_auth.py
+++ b/nova/tests/test_auth.py
@@ -86,6 +86,7 @@ class _AuthManagerBaseTestCase(test.TestCase):
super(_AuthManagerBaseTestCase, self).setUp()
self.flags(connection_type='fake')
self.manager = manager.AuthManager(new=True)
+ self.manager.mc.cache = {}
def test_create_and_find_user(self):
with user_generator(self.manager):
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 55ea6be02..d2ff14f27 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -26,17 +26,16 @@ from eventlet import greenthread
from nova import context
from nova import crypto
from nova import db
+from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
-from nova import exception
from nova.auth import manager
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.image import local
-from nova.exception import NotFound
FLAGS = flags.FLAGS
@@ -68,7 +67,7 @@ class CloudTestCase(test.TestCase):
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
- 'type': 'machine'}}
+ 'type': 'machine', 'image_state': 'available'}}
self.stubs.Set(local.LocalImageService, 'show', fake_show)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
@@ -116,6 +115,18 @@ class CloudTestCase(test.TestCase):
public_ip=address)
db.floating_ip_destroy(self.context, address)
+ def test_allocate_address(self):
+ address = "10.10.10.10"
+ allocate = self.cloud.allocate_address
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'host': self.network.host})
+ self.assertEqual(allocate(self.context)['publicIp'], address)
+ db.floating_ip_destroy(self.context, address)
+ self.assertRaises(exception.NoMoreFloatingIps,
+ allocate,
+ self.context)
+
def test_associate_disassociate_address(self):
"""Verifies associate runs cleanly without raising an exception"""
address = "10.10.10.10"
@@ -254,10 +265,10 @@ class CloudTestCase(test.TestCase):
def test_describe_instances(self):
"""Makes sure describe_instances works and filters results."""
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
- 'image_id': 1,
+ 'image_ref': 1,
'host': 'host1'})
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
- 'image_id': 1,
+ 'image_ref': 1,
'host': 'host2'})
comp1 = db.service_create(self.context, {'host': 'host1',
'availability_zone': 'zone1',
@@ -290,7 +301,7 @@ class CloudTestCase(test.TestCase):
'type': 'machine'}}]
def fake_show_none(meh, context, id):
- raise NotFound
+ raise exception.ImageNotFound(image_id='bad_image_id')
self.stubs.Set(local.LocalImageService, 'detail', fake_detail)
# list all
@@ -308,7 +319,7 @@ class CloudTestCase(test.TestCase):
self.stubs.UnsetAll()
self.stubs.Set(local.LocalImageService, 'show', fake_show_none)
self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none)
- self.assertRaises(NotFound, describe_images,
+ self.assertRaises(exception.ImageNotFound, describe_images,
self.context, ['ami-fake'])
def test_describe_image_attribute(self):
@@ -445,9 +456,73 @@ class CloudTestCase(test.TestCase):
self._create_key('test')
self.cloud.delete_key_pair(self.context, 'test')
+ def test_run_instances(self):
+ # stub out the rpc call
+ def stub_cast(*args, **kwargs):
+ pass
+
+ self.stubs.Set(rpc, 'cast', stub_cast)
+
+ kwargs = {'image_id': FLAGS.default_image,
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['imageId'], 'ami-00000001')
+ self.assertEqual(instance['displayName'], 'Server 1')
+ self.assertEqual(instance['instanceId'], 'i-00000001')
+ self.assertEqual(instance['instanceState']['name'], 'scheduling')
+ self.assertEqual(instance['instanceType'], 'm1.small')
+
+ def test_run_instances_image_state_none(self):
+ kwargs = {'image_id': FLAGS.default_image,
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_no_state(self, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ 'type': 'machine'}}
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(local.LocalImageService, 'show', fake_show_no_state)
+ self.assertRaises(exception.ApiError, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_image_state_invalid(self):
+ kwargs = {'image_id': FLAGS.default_image,
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_decrypt(self, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ 'type': 'machine', 'image_state': 'decrypting'}}
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt)
+ self.assertRaises(exception.ApiError, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_image_status_active(self):
+ kwargs = {'image_id': FLAGS.default_image,
+ 'instance_type': FLAGS.default_instance_type,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_stat_active(self, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ 'type': 'machine'}, 'status': 'active'}
+
+ self.stubs.Set(local.LocalImageService, 'show', fake_show_stat_active)
+
+ result = run_instances(self.context, **kwargs)
+ self.assertEqual(len(result['instancesSet']), 1)
+
def test_terminate_instances(self):
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
- 'image_id': 1,
+ 'image_ref': 1,
'host': 'host1'})
terminate_instances = self.cloud.terminate_instances
# valid instance_id
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 9170837b6..b4ac2dbc4 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -19,7 +19,6 @@
Tests For Compute
"""
-import datetime
import mox
import stubout
@@ -84,7 +83,7 @@ class ComputeTestCase(test.TestCase):
def _create_instance(self, params={}):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 1
+ inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user.id
@@ -150,7 +149,7 @@ class ComputeTestCase(test.TestCase):
ref = self.compute_api.create(
self.context,
instance_type=instance_types.get_default_instance_type(),
- image_id=None,
+ image_href=None,
security_group=['testgroup'])
try:
self.assertEqual(len(db.security_group_get_by_instance(
@@ -168,7 +167,7 @@ class ComputeTestCase(test.TestCase):
ref = self.compute_api.create(
self.context,
instance_type=instance_types.get_default_instance_type(),
- image_id=None,
+ image_href=None,
security_group=['testgroup'])
try:
db.instance_destroy(self.context, ref[0]['id'])
@@ -184,7 +183,7 @@ class ComputeTestCase(test.TestCase):
ref = self.compute_api.create(
self.context,
instance_type=instance_types.get_default_instance_type(),
- image_id=None,
+ image_href=None,
security_group=['testgroup'])
try:
@@ -217,12 +216,12 @@ class ComputeTestCase(test.TestCase):
instance_ref = db.instance_get(self.context, instance_id)
self.assertEqual(instance_ref['launched_at'], None)
self.assertEqual(instance_ref['deleted_at'], None)
- launch = datetime.datetime.utcnow()
+ launch = utils.utcnow()
self.compute.run_instance(self.context, instance_id)
instance_ref = db.instance_get(self.context, instance_id)
self.assert_(instance_ref['launched_at'] > launch)
self.assertEqual(instance_ref['deleted_at'], None)
- terminate = datetime.datetime.utcnow()
+ terminate = utils.utcnow()
self.compute.terminate_instance(self.context, instance_id)
self.context = self.context.elevated(True)
instance_ref = db.instance_get(self.context, instance_id)
diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py
index 1a9a867ee..831e7670f 100644
--- a/nova/tests/test_console.py
+++ b/nova/tests/test_console.py
@@ -20,8 +20,6 @@
Tests For Console proxy.
"""
-import datetime
-
from nova import context
from nova import db
from nova import exception
diff --git a/nova/tests/test_crypto.py b/nova/tests/test_crypto.py
index 945d78794..6c25b396e 100644
--- a/nova/tests/test_crypto.py
+++ b/nova/tests/test_crypto.py
@@ -16,7 +16,11 @@
Tests for Crypto module.
"""
+import mox
+import stubout
+
from nova import crypto
+from nova import db
from nova import test
@@ -46,3 +50,82 @@ class SymmetricKeyTestCase(test.TestCase):
plain = decrypt(cipher_text)
self.assertEquals(plain_text, plain)
+
+
+class RevokeCertsTest(test.TestCase):
+
+ def setUp(self):
+ super(RevokeCertsTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(RevokeCertsTest, self).tearDown()
+
+ def test_revoke_certs_by_user_and_project(self):
+ user_id = 'test_user'
+ project_id = 2
+ file_name = 'test_file'
+
+ def mock_certificate_get_all_by_user_and_project(context,
+ user_id,
+ project_id):
+
+ return [{"user_id": user_id, "project_id": project_id,
+ "file_name": file_name}]
+
+ self.stubs.Set(db, 'certificate_get_all_by_user_and_project',
+ mock_certificate_get_all_by_user_and_project)
+
+ self.mox.StubOutWithMock(crypto, 'revoke_cert')
+ crypto.revoke_cert(project_id, file_name)
+
+ self.mox.ReplayAll()
+
+ crypto.revoke_certs_by_user_and_project(user_id, project_id)
+
+ self.mox.VerifyAll()
+
+ def test_revoke_certs_by_user(self):
+ user_id = 'test_user'
+ project_id = 2
+ file_name = 'test_file'
+
+ def mock_certificate_get_all_by_user(context, user_id):
+
+ return [{"user_id": user_id, "project_id": project_id,
+ "file_name": file_name}]
+
+ self.stubs.Set(db, 'certificate_get_all_by_user',
+ mock_certificate_get_all_by_user)
+
+ self.mox.StubOutWithMock(crypto, 'revoke_cert')
+ crypto.revoke_cert(project_id, mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ crypto.revoke_certs_by_user(user_id)
+
+ self.mox.VerifyAll()
+
+ def test_revoke_certs_by_project(self):
+ user_id = 'test_user'
+ project_id = 2
+ file_name = 'test_file'
+
+ def mock_certificate_get_all_by_project(context, project_id):
+
+ return [{"user_id": user_id, "project_id": project_id,
+ "file_name": file_name}]
+
+ self.stubs.Set(db, 'certificate_get_all_by_project',
+ mock_certificate_get_all_by_project)
+
+ self.mox.StubOutWithMock(crypto, 'revoke_cert')
+ crypto.revoke_cert(project_id, mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ crypto.revoke_certs_by_project(project_id)
+
+ self.mox.VerifyAll()
diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py
index 07817cc5a..3361c7b73 100644
--- a/nova/tests/test_host_filter.py
+++ b/nova/tests/test_host_filter.py
@@ -133,13 +133,14 @@ class HostFilterTestCase(test.TestCase):
raw = ['or',
['and',
['<', '$compute.host_memory_free', 30],
- ['<', '$compute.disk_available', 300]
+ ['<', '$compute.disk_available', 300],
],
['and',
['>', '$compute.host_memory_free', 70],
- ['>', '$compute.disk_available', 700]
- ]
+ ['>', '$compute.disk_available', 700],
+ ],
]
+
cooked = json.dumps(raw)
hosts = hf.filter_hosts(self.zone_manager, cooked)
@@ -183,13 +184,11 @@ class HostFilterTestCase(test.TestCase):
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
- ['not', True, False, True, False]
- )))
+ ['not', True, False, True, False])))
try:
hf.filter_hosts(self.zone_manager, json.dumps(
- 'not', True, False, True, False
- ))
+ 'not', True, False, True, False))
self.fail("Should give KeyError")
except KeyError, e:
pass
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 1fac4e4e6..8b4183164 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import eventlet
import mox
import os
@@ -125,6 +126,7 @@ class CacheConcurrencyTestCase(test.TestCase):
class LibvirtConnTestCase(test.TestCase):
+
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
connection._late_load_cheetah()
@@ -161,6 +163,7 @@ class LibvirtConnTestCase(test.TestCase):
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
+ 'image_ref': '123456',
'instance_type_id': '5'} # m1.small
def lazy_load_library_exists(self):
@@ -206,6 +209,29 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
connection.LibvirtConnection._conn = fake
+ def fake_lookup(self, instance_name):
+
+ class FakeVirtDomain(object):
+
+ def snapshotCreateXML(self, *args):
+ return None
+
+ def XMLDesc(self, *args):
+ return """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+
+ return FakeVirtDomain()
+
+ def fake_execute(self, *args):
+ open(args[-1], "a").close()
+
def create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'binary': 'nova-compute',
@@ -281,6 +307,81 @@ class LibvirtConnTestCase(test.TestCase):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
+ def test_snapshot(self):
+ if not self.lazy_load_library_exists():
+ return
+
+ FLAGS.image_service = 'nova.image.fake.FakeImageService'
+
+ # Start test
+ image_service = utils.import_object(FLAGS.image_service)
+
+ # Assuming that base image already exists in image_service
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
+ connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(connection.utils, 'execute')
+ connection.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = connection.LibvirtConnection(False)
+ conn.snapshot(instance_ref, recv_meta['id'])
+
+ snapshot = image_service.show(context, recv_meta['id'])
+ self.assertEquals(snapshot['properties']['image_state'], 'available')
+ self.assertEquals(snapshot['status'], 'active')
+ self.assertEquals(snapshot['name'], snapshot_name)
+
+ def test_snapshot_no_image_architecture(self):
+ if not self.lazy_load_library_exists():
+ return
+
+ FLAGS.image_service = 'nova.image.fake.FakeImageService'
+
+ # Start test
+ image_service = utils.import_object(FLAGS.image_service)
+
+ # Assign image_ref = 2 from nova/images/fakes for testing different
+ # base image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = "2"
+
+ # Assuming that base image already exists in image_service
+ instance_ref = db.instance_create(self.context, test_instance)
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
+ connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(connection.utils, 'execute')
+ connection.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = connection.LibvirtConnection(False)
+ conn.snapshot(instance_ref, recv_meta['id'])
+
+ snapshot = image_service.show(context, recv_meta['id'])
+ self.assertEquals(snapshot['properties']['image_state'], 'available')
+ self.assertEquals(snapshot['status'], 'active')
+ self.assertEquals(snapshot['name'], snapshot_name)
+
def test_multi_nic(self):
instance_data = dict(self.test_instance)
network_info = _create_network_info(2)
@@ -661,6 +762,31 @@ class LibvirtConnTestCase(test.TestCase):
super(LibvirtConnTestCase, self).tearDown()
+class NWFilterFakes:
+ def __init__(self):
+ self.filters = {}
+
+ def nwfilterLookupByName(self, name):
+ if name in self.filters:
+ return self.filters[name]
+ raise libvirt.libvirtError('Filter Not Found')
+
+ def filterDefineXMLMock(self, xml):
+ class FakeNWFilterInternal:
+ def __init__(self, parent, name):
+ self.name = name
+ self.parent = parent
+
+ def undefine(self):
+ del self.parent.filters[self.name]
+ pass
+ tree = xml_to_tree(xml)
+ name = tree.get('name')
+ if name not in self.filters:
+ self.filters[name] = FakeNWFilterInternal(self, name)
+ return True
+
+
class IptablesFirewallTestCase(test.TestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
@@ -678,6 +804,20 @@ class IptablesFirewallTestCase(test.TestCase):
self.fw = firewall.IptablesFirewallDriver(
get_connection=lambda: self.fake_libvirt_connection)
+ def lazy_load_library_exists(self):
+ """check if libvirt is available."""
+ # try to connect libvirt. if fail, skip test.
+ try:
+ import libvirt
+ import libxml2
+ except ImportError:
+ return False
+ global libvirt
+ libvirt = __import__('libvirt')
+ connection.libvirt = __import__('libvirt')
+ connection.libxml2 = __import__('libxml2')
+ return True
+
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
@@ -883,6 +1023,40 @@ class IptablesFirewallTestCase(test.TestCase):
self.mox.ReplayAll()
self.fw.do_refresh_security_group_rules("fake")
+ def test_unfilter_instance_undefines_nwfilter(self):
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ admin_ctxt = context.get_admin_context()
+
+ fakefilter = NWFilterFakes()
+ self.fw.nwfilter._conn.nwfilterDefineXML =\
+ fakefilter.filterDefineXMLMock
+ self.fw.nwfilter._conn.nwfilterLookupByName =\
+ fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance_ref()
+ inst_id = instance_ref['id']
+ instance = db.instance_get(self.context, inst_id)
+
+ ip = '10.11.12.13'
+ network_ref = db.project_get_network(self.context, 'fake')
+ fixed_ip = {'address': ip, 'network_id': network_ref['id']}
+ db.fixed_ip_create(admin_ctxt, fixed_ip)
+ db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
+ 'instance_id': inst_id})
+ self.fw.setup_basic_filtering(instance)
+ self.fw.prepare_instance_filter(instance)
+ self.fw.apply_instance_filter(instance)
+ original_filter_count = len(fakefilter.filters)
+ self.fw.unfilter_instance(instance)
+
+ # should undefine just the instance filter
+ self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
+
+ db.instance_destroy(admin_ctxt, instance_ref['id'])
+
class NWFilterTestCase(test.TestCase):
def setUp(self):
@@ -1059,3 +1233,37 @@ class NWFilterTestCase(test.TestCase):
network_info,
"fake")
self.assertEquals(len(result), 3)
+
+ def test_unfilter_instance_undefines_nwfilters(self):
+ admin_ctxt = context.get_admin_context()
+
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ inst_id = instance_ref['id']
+
+ self.security_group = self.setup_and_return_security_group()
+
+ db.instance_add_security_group(self.context, inst_id,
+ self.security_group.id)
+
+ instance = db.instance_get(self.context, inst_id)
+
+ ip = '10.11.12.13'
+ network_ref = db.project_get_network(self.context, 'fake')
+ fixed_ip = {'address': ip, 'network_id': network_ref['id']}
+ db.fixed_ip_create(admin_ctxt, fixed_ip)
+ db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
+ 'instance_id': inst_id})
+ self.fw.setup_basic_filtering(instance)
+ self.fw.prepare_instance_filter(instance)
+ self.fw.apply_instance_filter(instance)
+ original_filter_count = len(fakefilter.filters)
+ self.fw.unfilter_instance(instance)
+
+ # should undefine 2 filters: instance and instance-secgroup
+ self.assertEqual(original_filter_count - len(fakefilter.filters), 2)
+
+ db.instance_destroy(admin_ctxt, instance_ref['id'])
diff --git a/nova/tests/test_middleware.py b/nova/tests/test_middleware.py
index 6564a6955..40d117c45 100644
--- a/nova/tests/test_middleware.py
+++ b/nova/tests/test_middleware.py
@@ -16,7 +16,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import datetime
import webob
import webob.dec
import webob.exc
diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py
index cf8f4c05e..c5875a843 100644
--- a/nova/tests/test_misc.py
+++ b/nova/tests/test_misc.py
@@ -21,11 +21,24 @@ import select
from eventlet import greenpool
from eventlet import greenthread
+from nova import exception
from nova import test
from nova import utils
from nova.utils import parse_mailmap, str_dict_replace
+class ExceptionTestCase(test.TestCase):
+ @staticmethod
+ def _raise_exc(exc):
+ raise exc()
+
+ def test_exceptions_raise(self):
+ for name in dir(exception):
+ exc = getattr(exception, name)
+ if isinstance(exc, type):
+ self.assertRaises(exc, self._raise_exc, exc)
+
+
class ProjectTestCase(test.TestCase):
def test_authors_up_to_date(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py
index b6b0fcc68..64b799a2c 100644
--- a/nova/tests/test_notifier.py
+++ b/nova/tests/test_notifier.py
@@ -13,10 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-import nova
+import stubout
+import nova
from nova import context
from nova import flags
+from nova import log
from nova import rpc
import nova.notifier.api
from nova.notifier.api import notify
@@ -24,8 +26,6 @@ from nova.notifier import no_op_notifier
from nova.notifier import rabbit_notifier
from nova import test
-import stubout
-
class NotifierTestCase(test.TestCase):
"""Test case for notifications"""
@@ -115,3 +115,22 @@ class NotifierTestCase(test.TestCase):
notify('publisher_id',
'event_type', 'DEBUG', dict(a=3))
self.assertEqual(self.test_topic, 'testnotify.debug')
+
+ def test_error_notification(self):
+ self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
+ 'nova.notifier.rabbit_notifier')
+ self.stubs.Set(nova.flags.FLAGS, 'publish_errors', True)
+ LOG = log.getLogger('nova')
+ LOG.setup_from_flags()
+ msgs = []
+
+ def mock_cast(context, topic, data):
+ msgs.append(data)
+
+ self.stubs.Set(nova.rpc, 'cast', mock_cast)
+ LOG.error('foo')
+ self.assertEqual(1, len(msgs))
+ msg = msgs[0]
+ self.assertEqual(msg['event_type'], 'error_notification')
+ self.assertEqual(msg['priority'], 'ERROR')
+ self.assertEqual(msg['payload']['error'], 'foo')
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index ad73a3a69..0691231e4 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -223,7 +223,7 @@ class QuotaTestCase(test.TestCase):
min_count=1,
max_count=1,
instance_type=inst_type,
- image_id=1)
+ image_href=1)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -237,7 +237,7 @@ class QuotaTestCase(test.TestCase):
min_count=1,
max_count=1,
instance_type=inst_type,
- image_id=1)
+ image_href=1)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -295,7 +295,7 @@ class QuotaTestCase(test.TestCase):
min_count=1,
max_count=1,
instance_type=inst_type,
- image_id='fake',
+ image_href='fake',
metadata=metadata)
def test_default_allowed_injected_files(self):
@@ -341,16 +341,18 @@ class QuotaTestCase(test.TestCase):
self.assertEqual(limit, 23456)
def _create_with_injected_files(self, files):
+ FLAGS.image_service = 'nova.image.fake.FakeImageService'
api = compute.API(image_service=self.StubImageService())
inst_type = instance_types.get_instance_type_by_name('m1.small')
api.create(self.context, min_count=1, max_count=1,
- instance_type=inst_type, image_id='fake',
+ instance_type=inst_type, image_href='3',
injected_files=files)
def test_no_injected_files(self):
+ FLAGS.image_service = 'nova.image.fake.FakeImageService'
api = compute.API(image_service=self.StubImageService())
inst_type = instance_types.get_instance_type_by_name('m1.small')
- api.create(self.context, instance_type=inst_type, image_id='fake')
+ api.create(self.context, instance_type=inst_type, image_href='3')
def test_max_injected_files(self):
files = []
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index 22b66010a..eddf01e9f 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -55,8 +55,7 @@ class VMWareAPIVMTestCase(test.TestCase):
vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
- glance_stubs.stubout_glance_client(self.stubs,
- glance_stubs.FakeGlance)
+ glance_stubs.stubout_glance_client(self.stubs)
self.conn = vmwareapi_conn.get_connection(False)
def _create_instance_in_the_db(self):
@@ -64,13 +63,13 @@ class VMWareAPIVMTestCase(test.TestCase):
'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
- 'image_id': "1",
+ 'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
}
- self.instance = db.instance_create(values)
+ self.instance = db.instance_create(None, values)
def _create_vm(self):
"""Create and spawn the VM."""
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 9d56c1644..d1c88287a 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -79,7 +79,7 @@ class XenAPIVolumeTestCase(test.TestCase):
self.values = {'id': 1,
'project_id': 'fake',
'user_id': 'fake',
- 'image_id': 1,
+ 'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
@@ -193,8 +193,7 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_is_vdi_pv(self.stubs)
self.stubs.Set(VMOps, 'reset_network', reset_network)
stubs.stub_out_vm_methods(self.stubs)
- glance_stubs.stubout_glance_client(self.stubs,
- glance_stubs.FakeGlance)
+ glance_stubs.stubout_glance_client(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs)
self.context = context.RequestContext('fake', 'fake', False)
self.conn = xenapi_conn.get_connection(False)
@@ -207,7 +206,7 @@ class XenAPIVMTestCase(test.TestCase):
'id': id,
'project_id': proj,
'user_id': user,
- 'image_id': 1,
+ 'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
@@ -332,7 +331,7 @@ class XenAPIVMTestCase(test.TestCase):
def check_vm_params_for_linux(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
- self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
+ self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
@@ -351,14 +350,14 @@ class XenAPIVMTestCase(test.TestCase):
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
- def _test_spawn(self, image_id, kernel_id, ramdisk_id,
+ def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
instance_id=1, check_injection=False):
stubs.stubout_loopingcall_start(self.stubs)
values = {'id': instance_id,
'project_id': self.project.id,
'user_id': self.user.id,
- 'image_id': image_id,
+ 'image_ref': image_ref,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'instance_type_id': instance_type_id,
@@ -567,7 +566,7 @@ class XenAPIVMTestCase(test.TestCase):
'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
- 'image_id': 1,
+ 'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
@@ -641,7 +640,7 @@ class XenAPIMigrateInstance(test.TestCase):
self.values = {'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
- 'image_id': 1,
+ 'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
'local_gb': 5,
@@ -652,8 +651,7 @@ class XenAPIMigrateInstance(test.TestCase):
fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
- glance_stubs.stubout_glance_client(self.stubs,
- glance_stubs.FakeGlance)
+ glance_stubs.stubout_glance_client(self.stubs)
def tearDown(self):
super(XenAPIMigrateInstance, self).tearDown()
@@ -679,8 +677,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
"""Unit tests for code that detects the ImageType."""
def setUp(self):
super(XenAPIDetermineDiskImageTestCase, self).setUp()
- glance_stubs.stubout_glance_client(self.stubs,
- glance_stubs.FakeGlance)
+ glance_stubs.stubout_glance_client(self.stubs)
class FakeInstance(object):
pass
@@ -697,7 +694,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
def test_instance_disk(self):
"""If a kernel is specified, the image type is DISK (aka machine)."""
FLAGS.xenapi_image_service = 'objectstore'
- self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE
+ self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE
self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
self.assert_disk_type(vm_utils.ImageType.DISK)
@@ -707,7 +704,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
DISK_RAW is assumed.
"""
FLAGS.xenapi_image_service = 'objectstore'
- self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
+ self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
@@ -717,7 +714,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
this case will be 'raw'.
"""
FLAGS.xenapi_image_service = 'glance'
- self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
+ self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
@@ -727,7 +724,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
this case will be 'vhd'.
"""
FLAGS.xenapi_image_service = 'glance'
- self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD
+ self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py
deleted file mode 100644
index 37169fb97..000000000
--- a/nova/tests/test_zone_aware_scheduler.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For Zone Aware Scheduler.
-"""
-
-from nova import test
-from nova.scheduler import driver
-from nova.scheduler import zone_aware_scheduler
-from nova.scheduler import zone_manager
-
-
-class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
- def filter_hosts(self, num, specs):
- # NOTE(sirp): this is returning [(hostname, services)]
- return self.zone_manager.service_states.items()
-
- def weigh_hosts(self, num, specs, hosts):
- fake_weight = 99
- weighted = []
- for hostname, caps in hosts:
- weighted.append(dict(weight=fake_weight, name=hostname))
- return weighted
-
-
-class FakeZoneManager(zone_manager.ZoneManager):
- def __init__(self):
- self.service_states = {
- 'host1': {
- 'compute': {'ram': 1000}
- },
- 'host2': {
- 'compute': {'ram': 2000}
- },
- 'host3': {
- 'compute': {'ram': 3000}
- }
- }
-
-
-class FakeEmptyZoneManager(zone_manager.ZoneManager):
- def __init__(self):
- self.service_states = {}
-
-
-def fake_empty_call_zone_method(context, method, specs):
- return []
-
-
-def fake_call_zone_method(context, method, specs):
- return [
- ('zone1', [
- dict(weight=1, blob='AAAAAAA'),
- dict(weight=111, blob='BBBBBBB'),
- dict(weight=112, blob='CCCCCCC'),
- dict(weight=113, blob='DDDDDDD'),
- ]),
- ('zone2', [
- dict(weight=120, blob='EEEEEEE'),
- dict(weight=2, blob='FFFFFFF'),
- dict(weight=122, blob='GGGGGGG'),
- dict(weight=123, blob='HHHHHHH'),
- ]),
- ('zone3', [
- dict(weight=130, blob='IIIIIII'),
- dict(weight=131, blob='JJJJJJJ'),
- dict(weight=132, blob='KKKKKKK'),
- dict(weight=3, blob='LLLLLLL'),
- ]),
- ]
-
-
-class ZoneAwareSchedulerTestCase(test.TestCase):
- """Test case for Zone Aware Scheduler."""
-
- def test_zone_aware_scheduler(self):
- """
- Create a nested set of FakeZones, ensure that a select call returns the
- appropriate build plan.
- """
- sched = FakeZoneAwareScheduler()
- self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
-
- zm = FakeZoneManager()
- sched.set_zone_manager(zm)
-
- fake_context = {}
- build_plan = sched.select(fake_context, {})
-
- self.assertEqual(15, len(build_plan))
-
- hostnames = [plan_item['name']
- for plan_item in build_plan if 'name' in plan_item]
- self.assertEqual(3, len(hostnames))
-
- def test_empty_zone_aware_scheduler(self):
- """
- Ensure empty hosts & child_zones result in NoValidHosts exception.
- """
- sched = FakeZoneAwareScheduler()
- self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
-
- zm = FakeEmptyZoneManager()
- sched.set_zone_manager(zm)
-
- fake_context = {}
- self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
- fake_context, 1,
- dict(host_filter=None, instance_type={}))
diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py
index 0addd5573..d4eb87daf 100644
--- a/nova/tests/vmwareapi/db_fakes.py
+++ b/nova/tests/vmwareapi/db_fakes.py
@@ -52,7 +52,7 @@ def stub_out_db_instance_api(stubs):
else:
raise NotImplementedError()
- def fake_instance_create(values):
+ def fake_instance_create(context, values):
"""Stubs out the db.instance_create method."""
type_data = INSTANCE_TYPES[values['instance_type']]
@@ -61,7 +61,7 @@ def stub_out_db_instance_api(stubs):
'name': values['name'],
'id': values['id'],
'reservation_id': utils.generate_uid('r'),
- 'image_id': values['image_id'],
+ 'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
'state_description': 'scheduling',
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 35308d95f..151a3e909 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -42,20 +42,6 @@ def stubout_instance_snapshot(stubs):
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
- def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
- original_parent_uuid):
- from nova.virt.xenapi.fake import create_vdi
- name_label = "instance-%s" % instance_id
- #TODO: create fake SR record
- sr_ref = "fakesr"
- vdi_ref = create_vdi(name_label=name_label, read_only=False,
- sr_ref=sr_ref, sharable=False)
- vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
- vdi_uuid = vdi_rec['uuid']
- return vdi_uuid
-
- stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
-
def fake_parse_xmlrpc_value(val):
return val
@@ -251,10 +237,10 @@ class FakeSessionForMigrationTests(fake.SessionBase):
def __init__(self, uri):
super(FakeSessionForMigrationTests, self).__init__(uri)
- def VDI_get_by_uuid(*args):
+ def VDI_get_by_uuid(self, *args):
return 'hurr'
- def VDI_resize_online(*args):
+ def VDI_resize_online(self, *args):
pass
def VM_start(self, _1, ref, _2, _3):
diff --git a/nova/twistd.py b/nova/twistd.py
index c07ed991f..15cf67825 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -78,7 +78,7 @@ def WrapTwistedOptions(wrapped):
self._absorbParameters()
self._absorbHandlers()
- super(TwistedOptionsToFlags, self).__init__()
+ wrapped.__init__(self)
def _absorbFlags(self):
twistd_flags = []
@@ -163,12 +163,12 @@ def WrapTwistedOptions(wrapped):
def parseArgs(self, *args):
# TODO(termie): figure out a decent way of dealing with args
#return
- super(TwistedOptionsToFlags, self).parseArgs(*args)
+ wrapped.parseArgs(self, *args)
def postOptions(self):
self._doHandlers()
- super(TwistedOptionsToFlags, self).postOptions()
+ wrapped.postOptions(self)
def __getitem__(self, key):
key = key.replace('-', '_')
diff --git a/nova/utils.py b/nova/utils.py
index 361fc9873..691134ada 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -142,24 +142,26 @@ def execute(*cmd, **kwargs):
env = os.environ.copy()
if addl_env:
env.update(addl_env)
+ _PIPE = subprocess.PIPE # pylint: disable=E1101
obj = subprocess.Popen(cmd,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
+ stdin=_PIPE,
+ stdout=_PIPE,
+ stderr=_PIPE,
env=env)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
- obj.stdin.close()
- if obj.returncode:
- LOG.debug(_('Result was %s') % obj.returncode)
+ obj.stdin.close() # pylint: disable=E1101
+ _returncode = obj.returncode # pylint: disable=E1101
+ if _returncode:
+ LOG.debug(_('Result was %s') % _returncode)
if type(check_exit_code) == types.IntType \
- and obj.returncode != check_exit_code:
+ and _returncode != check_exit_code:
(stdout, stderr) = result
raise exception.ProcessExecutionError(
- exit_code=obj.returncode,
+ exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
@@ -307,7 +309,7 @@ def get_my_linklocal(interface):
def utcnow():
- """Overridable version of datetime.datetime.utcnow."""
+ """Overridable version of utils.utcnow."""
if utcnow.override_time:
return utcnow.override_time
return datetime.datetime.utcnow()
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 1142e97a4..05b4775c1 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -151,7 +151,7 @@ class HyperVConnection(driver.ComputeDriver):
base_vhd_filename = os.path.join(FLAGS.instances_path,
instance.name)
vhdfile = "%s.vhd" % (base_vhd_filename)
- images.fetch(instance['image_id'], vhdfile, user, project)
+ images.fetch(instance['image_ref'], vhdfile, user, project)
try:
self._create_vm(instance)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 02c898fda..40bf6107c 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -23,6 +23,8 @@ Handling of VM disk images.
from nova import context
from nova import flags
+from nova.image import glance as glance_image_service
+import nova.image
from nova import log as logging
from nova import utils
@@ -31,23 +33,13 @@ FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.virt.images')
-def fetch(image_id, path, _user, _project):
+def fetch(image_href, path, _user, _project):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
- image_service = utils.import_object(FLAGS.image_service)
+ (image_service, image_id) = nova.image.get_image_service(image_href)
with open(path, "wb") as image_file:
elevated = context.get_admin_context()
metadata = image_service.get(elevated, image_id, image_file)
return metadata
-
-
-# TODO(vish): xenapi should use the glance client code directly instead
-# of retrieving the image using this method.
-def image_url(image):
- if FLAGS.image_service == "nova.image.glance.GlanceImageService":
- return "http://%s:%s/images/%s" % (FLAGS.glance_host,
- FLAGS.glance_port, image)
- return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
- image)
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index f563d0681..98cdff311 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -36,6 +36,7 @@ Supports KVM, LXC, QEMU, UML, and XEN.
"""
+import hashlib
import multiprocessing
import os
import random
@@ -57,6 +58,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+import nova.image
from nova import log as logging
from nova import utils
from nova import vnc
@@ -378,7 +380,7 @@ class LibvirtConnection(driver.ComputeDriver):
virt_dom.detachDevice(xml)
@exception.wrap_exception
- def snapshot(self, instance, image_id):
+ def snapshot(self, instance, image_href):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+, the qemu_img flag is
@@ -386,17 +388,22 @@ class LibvirtConnection(driver.ComputeDriver):
to support this command.
"""
- image_service = utils.import_object(FLAGS.image_service)
virt_dom = self._lookup_by_name(instance['name'])
elevated = context.get_admin_context()
- base = image_service.show(elevated, instance['image_id'])
+ (image_service, image_id) = nova.image.get_image_service(
+ instance['image_ref'])
+ base = image_service.show(elevated, image_id)
+ (snapshot_image_service, snapshot_image_id) = \
+ nova.image.get_image_service(image_href)
+ snapshot = snapshot_image_service.show(elevated, snapshot_image_id)
metadata = {'disk_format': base['disk_format'],
'container_format': base['container_format'],
'is_public': False,
- 'name': '%s.%s' % (base['name'], image_id),
- 'properties': {'architecture': base['architecture'],
+ 'status': 'active',
+ 'name': snapshot['name'],
+ 'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
@@ -404,6 +411,9 @@ class LibvirtConnection(driver.ComputeDriver):
'ramdisk_id': instance['ramdisk_id'],
}
}
+ if 'architecture' in base['properties']:
+ arch = base['properties']['architecture']
+ metadata['properties']['architecture'] = arch
# Make the snapshot
snapshot_name = uuid.uuid4().hex
@@ -438,7 +448,7 @@ class LibvirtConnection(driver.ComputeDriver):
# Upload that image to the image service
with open(out_path) as image_file:
image_service.update(elevated,
- image_id,
+ image_href,
metadata,
image_file)
@@ -784,7 +794,7 @@ class LibvirtConnection(driver.ComputeDriver):
project = manager.AuthManager().get_project(inst['project_id'])
if not disk_images:
- disk_images = {'image_id': inst['image_id'],
+ disk_images = {'image_id': inst['image_ref'],
'kernel_id': inst['kernel_id'],
'ramdisk_id': inst['ramdisk_id']}
@@ -805,7 +815,7 @@ class LibvirtConnection(driver.ComputeDriver):
user=user,
project=project)
- root_fname = '%08x' % int(disk_images['image_id'])
+ root_fname = hashlib.sha1(disk_images['image_id']).hexdigest()
size = FLAGS.minimum_root_size
inst_type_id = inst['instance_type_id']
@@ -880,7 +890,7 @@ class LibvirtConnection(driver.ComputeDriver):
if key or net:
inst_name = inst['name']
- img_id = inst.image_id
+ img_id = inst.image_ref
if key:
LOG.info(_('instance %(inst_name)s: injecting key into'
' image %(img_id)s') % locals())
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index 7e00662cd..84153fa1e 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -195,7 +195,7 @@ class NWFilterFirewall(FirewallDriver):
logging.info('ensuring static filters')
self._ensure_static_filters()
- if instance['image_id'] == str(FLAGS.vpn_image_id):
+ if instance['image_ref'] == str(FLAGS.vpn_image_id):
base_filter = 'nova-vpn'
else:
base_filter = 'nova-base'
@@ -285,8 +285,29 @@ class NWFilterFirewall(FirewallDriver):
tpool.execute(self._conn.nwfilterDefineXML, xml)
def unfilter_instance(self, instance):
- # Nothing to do
- pass
+ """Clear out the nwfilter rules."""
+ network_info = netutils.get_network_info(instance)
+ instance_name = instance.name
+ for (network, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+
+ try:
+ self._conn.nwfilterLookupByName(instance_filter_name).\
+ undefine()
+ except libvirt.libvirtError:
+ LOG.debug(_('The nwfilter(%(instance_filter_name)s) '
+ 'for %(instance_name)s is not found.') % locals())
+
+ instance_secgroup_filter_name =\
+ '%s-secgroup' % (self._instance_filter_name(instance))
+
+ try:
+ self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\
+ .undefine()
+ except libvirt.libvirtError:
+ LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) '
+ 'for %(instance_name)s is not found.') % locals())
def prepare_instance_filter(self, instance, network_info=None):
"""
@@ -336,7 +357,7 @@ class NWFilterFirewall(FirewallDriver):
def _create_network_filters(self, instance, network_info,
instance_secgroup_filter_name):
- if instance['image_id'] == str(FLAGS.vpn_image_id):
+ if instance['image_ref'] == str(FLAGS.vpn_image_id):
base_filter = 'nova-vpn'
else:
base_filter = 'nova-base'
@@ -452,6 +473,7 @@ class IptablesFirewallDriver(FirewallDriver):
if self.instances.pop(instance['id'], None):
self.remove_filters_for_instance(instance)
self.iptables.apply()
+ self.nwfilter.unfilter_instance(instance)
else:
LOG.info(_('Attempted to unfilter instance %s which is not '
'filtered'), instance['id'])
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 6d7149841..5f76b0df5 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -150,7 +150,7 @@ class VMWareVMOps(object):
"""
image_size, image_properties = \
vmware_images.get_vmdk_size_and_properties(
- instance.image_id, instance)
+ instance.image_ref, instance)
vmdk_file_size_in_kb = int(image_size) / 1024
os_type = image_properties.get("vmware_ostype", "otherGuest")
adapter_type = image_properties.get("vmware_adaptertype",
@@ -265,23 +265,23 @@ class VMWareVMOps(object):
def _fetch_image_on_esx_datastore():
"""Fetch image from Glance to ESX datastore."""
- LOG.debug(_("Downloading image file data %(image_id)s to the ESX "
+ LOG.debug(_("Downloading image file data %(image_ref)s to the ESX "
"data store %(data_store_name)s") %
- ({'image_id': instance.image_id,
+ ({'image_ref': instance.image_ref,
'data_store_name': data_store_name}))
# Upload the -flat.vmdk file whose meta-data file we just created
# above
vmware_images.fetch_image(
- instance.image_id,
+ instance.image_ref,
instance,
host=self._session._host_ip,
data_center_name=self._get_datacenter_name_and_ref()[1],
datastore_name=data_store_name,
cookies=cookies,
file_path=flat_uploaded_vmdk_name)
- LOG.debug(_("Downloaded image file data %(image_id)s to the ESX "
+ LOG.debug(_("Downloaded image file data %(image_ref)s to the ESX "
"data store %(data_store_name)s") %
- ({'image_id': instance.image_id,
+ ({'image_ref': instance.image_ref,
'data_store_name': data_store_name}))
_fetch_image_on_esx_datastore()
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py
index 50c6baedf..48edc5384 100644
--- a/nova/virt/vmwareapi/vmware_images.py
+++ b/nova/virt/vmwareapi/vmware_images.py
@@ -18,10 +18,9 @@
Utility functions for Image transfer.
"""
-from glance import client
-
from nova import exception
from nova import flags
+import nova.image
from nova import log as logging
from nova.virt.vmwareapi import io_util
from nova.virt.vmwareapi import read_write_util
@@ -117,8 +116,8 @@ def upload_image(image, instance, **kwargs):
def _get_glance_image(image, instance, **kwargs):
"""Download image from the glance image server."""
LOG.debug(_("Downloading image %s from glance image server") % image)
- glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port)
- metadata, read_iter = glance_client.get_image(image)
+ (glance_client, image_id) = nova.image.get_glance_client(image)
+ metadata, read_iter = glance_client.get_image(image_id)
read_file_handle = read_write_util.GlanceFileRead(read_iter)
file_size = int(metadata['size'])
write_file_handle = read_write_util.VMWareHTTPWriteFile(
@@ -153,7 +152,7 @@ def _put_glance_image(image, instance, **kwargs):
kwargs.get("cookies"),
kwargs.get("file_path"))
file_size = read_file_handle.get_size()
- glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ (glance_client, image_id) = nova.image.get_glance_client(image)
# The properties and other fields that we need to set for the image.
image_metadata = {"is_public": True,
"disk_format": "vmdk",
@@ -165,7 +164,7 @@ def _put_glance_image(image, instance, **kwargs):
"vmware_image_version":
kwargs.get("image_version")}}
start_transfer(read_file_handle, file_size, glance_client=glance_client,
- image_id=image, image_meta=image_metadata)
+ image_id=image_id, image_meta=image_metadata)
LOG.debug(_("Uploaded image %s to the Glance image server") % image)
@@ -188,9 +187,8 @@ def get_vmdk_size_and_properties(image, instance):
LOG.debug(_("Getting image size for the image %s") % image)
if FLAGS.image_service == "nova.image.glance.GlanceImageService":
- glance_client = client.Client(FLAGS.glance_host,
- FLAGS.glance_port)
- meta_data = glance_client.get_image_meta(image)
+ (glance_client, image_id) = nova.image.get_glance_client(image)
+ meta_data = glance_client.get_image_meta(image_id)
size, properties = meta_data["size"], meta_data["properties"]
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
raise NotImplementedError
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 76988b172..113198689 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -51,13 +51,13 @@ A fake XenAPI SDK.
"""
-import datetime
import uuid
from pprint import pformat
from nova import exception
from nova import log as logging
+from nova import utils
_CLASSES = ['host', 'network', 'session', 'SR', 'VBD',
@@ -340,10 +340,6 @@ class SessionBase(object):
return
db_ref['xenstore_data'][key] = None
- def network_get_all_records_where(self, _1, _2):
- # TODO (salvatore-orlando): filter table on _2
- return _db_content['network']
-
def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
db_ref = _db_content['VM'][vm_ref]
if not 'xenstore_data' in db_ref:
@@ -354,7 +350,7 @@ class SessionBase(object):
#Always return 12GB available
return 12 * 1024 * 1024 * 1024
- def host_call_plugin(*args):
+ def host_call_plugin(self, *args):
return 'herp'
def network_get_all_records_where(self, _1, filter):
@@ -540,7 +536,7 @@ class SessionBase(object):
except Failure, exc:
task['error_info'] = exc.details
task['status'] = 'failed'
- task['finished'] = datetime.datetime.now()
+ task['finished'] = utils.utcnow()
return task_ref
def _check_session(self, params):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 06ee8ee9b..11da221f2 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -32,6 +32,8 @@ from xml.dom import minidom
import glance.client
from nova import exception
from nova import flags
+import nova.image
+from nova.image import glance as glance_image_service
from nova import log as logging
from nova import utils
from nova.auth.manager import AuthManager
@@ -155,7 +157,6 @@ class VMHelper(HelperBase):
rec['PV_ramdisk'] = ramdisk
else:
# 2. Use kernel within the image
- rec['PV_args'] = 'clocksource=jiffies'
rec['PV_bootloader'] = 'pygrub'
else:
# 3. Using hardware virtualization
@@ -357,10 +358,12 @@ class VMHelper(HelperBase):
os_type = instance.os_type or FLAGS.default_os_type
+ glance_host, glance_port = \
+ glance_image_service.pick_glance_api_server()
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
- 'glance_host': FLAGS.glance_host,
- 'glance_port': FLAGS.glance_port,
+ 'glance_host': glance_host,
+ 'glance_port': glance_port,
'sr_path': cls.get_sr_path(session),
'os_type': os_type}
@@ -408,9 +411,11 @@ class VMHelper(HelperBase):
# here (under Python 2.6+) and pass them as arguments
uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
+ glance_host, glance_port = \
+ glance_image_service.pick_glance_api_server()
params = {'image_id': image,
- 'glance_host': FLAGS.glance_host,
- 'glance_port': FLAGS.glance_port,
+ 'glance_host': glance_host,
+ 'glance_port': glance_port,
'uuid_stack': uuid_stack,
'sr_path': cls.get_sr_path(session)}
@@ -455,8 +460,8 @@ class VMHelper(HelperBase):
# DISK restores
sr_ref = safe_find_sr(session)
- client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
- meta, image_file = client.get_image(image)
+ glance_client, image_id = nova.image.get_glance_client(image)
+ meta, image_file = glance_client.get_image(image_id)
virtual_size = int(meta['size'])
vdi_size = virtual_size
LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
@@ -515,10 +520,10 @@ class VMHelper(HelperBase):
ImageType.DISK_RAW: 'DISK_RAW',
ImageType.DISK_VHD: 'DISK_VHD'}
disk_format = pretty_format[image_type]
- image_id = instance.image_id
+ image_ref = instance.image_ref
instance_id = instance.id
LOG.debug(_("Detected %(disk_format)s format for image "
- "%(image_id)s, instance %(instance_id)s") % locals())
+ "%(image_ref)s, instance %(instance_id)s") % locals())
def determine_from_glance():
glance_disk_format2nova_type = {
@@ -527,8 +532,9 @@ class VMHelper(HelperBase):
'ari': ImageType.KERNEL_RAMDISK,
'raw': ImageType.DISK_RAW,
'vhd': ImageType.DISK_VHD}
- client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
- meta = client.get_image_meta(instance.image_id)
+ image_ref = instance.image_ref
+ glance_client, image_id = nova.image.get_glance_client(image_ref)
+ meta = glance_client.get_image_meta(image_id)
disk_format = meta['disk_format']
try:
return glance_disk_format2nova_type[disk_format]
@@ -574,7 +580,8 @@ class VMHelper(HelperBase):
Returns: A single filename if image_type is KERNEL_RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
- url = images.image_url(image)
+ url = "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
+ image)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
if image_type == ImageType.KERNEL_RAMDISK:
fn = 'get_kernel'
@@ -1044,6 +1051,8 @@ def _stream_disk(dev, image_type, virtual_size, image_file):
offset = MBR_SIZE_BYTES
_write_partition(virtual_size, dev)
+ utils.execute('sudo', 'chown', os.getuid(), '/dev/%s' % dev)
+
with open('/dev/%s' % dev, 'wb') as f:
f.seek(offset)
for chunk in image_file:
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 2b3fb6a39..d105cf300 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -101,7 +101,7 @@ class VMOps(object):
if not vm_ref:
vm_ref = VMHelper.lookup(self._session, instance.name)
if vm_ref is None:
- raise exception(_('Attempted to power on non-existent instance'
+ raise Exception(_('Attempted to power on non-existent instance'
' bad instance id %s') % instance.id)
LOG.debug(_("Starting instance %s"), instance.name)
self._session.call_xenapi('VM.start', vm_ref, False, False)
@@ -111,7 +111,7 @@ class VMOps(object):
project = AuthManager().get_project(instance.project_id)
disk_image_type = VMHelper.determine_disk_image_type(instance)
vdis = VMHelper.fetch_image(self._session,
- instance.id, instance.image_id, user, project,
+ instance.id, instance.image_ref, user, project,
disk_image_type)
return vdis
@@ -160,9 +160,24 @@ class VMOps(object):
# Create the VM ref and attach the first disk
first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
vdis[0]['vdi_uuid'])
- use_pv_kernel = VMHelper.determine_is_pv(self._session,
- instance.id, first_vdi_ref, disk_image_type,
- instance.os_type)
+
+ vm_mode = instance.vm_mode and instance.vm_mode.lower()
+ if vm_mode == 'pv':
+ use_pv_kernel = True
+ elif vm_mode in ('hv', 'hvm'):
+ use_pv_kernel = False
+ vm_mode = 'hvm' # Normalize
+ else:
+ use_pv_kernel = VMHelper.determine_is_pv(self._session,
+ instance.id, first_vdi_ref, disk_image_type,
+ instance.os_type)
+ vm_mode = use_pv_kernel and 'pv' or 'hvm'
+
+ if instance.vm_mode != vm_mode:
+ # Update database with normalized (or determined) value
+ db.instance_update(context.get_admin_context(),
+ instance['id'], {'vm_mode': vm_mode})
+
vm_ref = VMHelper.create_vm(self._session, instance,
kernel, ramdisk, use_pv_kernel)
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
diff --git a/nova/volume/api.py b/nova/volume/api.py
index 5804955f7..b07f2e94b 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -20,14 +20,13 @@
Handles all requests relating to volumes.
"""
-import datetime
-from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import quota
from nova import rpc
+from nova import utils
from nova.db import base
FLAGS = flags.FLAGS
@@ -78,7 +77,7 @@ class API(base.Base):
volume = self.get(context, volume_id)
if volume['status'] != "available":
raise exception.ApiError(_("Volume status must be available"))
- now = datetime.datetime.utcnow()
+ now = utils.utcnow()
self.db.volume_update(context, volume_id, {'status': 'deleting',
'terminated_at': now})
host = volume['host']
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index ff53f0701..798bd379a 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -42,8 +42,6 @@ intact.
"""
-import datetime
-
from nova import context
from nova import exception
@@ -127,7 +125,7 @@ class VolumeManager(manager.SchedulerDependentManager):
volume_ref['id'], {'status': 'error'})
raise
- now = datetime.datetime.utcnow()
+ now = utils.utcnow()
self.db.volume_update(context,
volume_ref['id'], {'status': 'available',
'launched_at': now})
diff --git a/nova/wsgi.py b/nova/wsgi.py
index ea9bb963d..33ba852bc 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -85,36 +85,7 @@ class Server(object):
class Request(webob.Request):
-
- def best_match_content_type(self):
- """Determine the most acceptable content-type.
-
- Based on the query extension then the Accept header.
-
- """
- parts = self.path.rsplit('.', 1)
-
- if len(parts) > 1:
- format = parts[1]
- if format in ['json', 'xml']:
- return 'application/{0}'.format(parts[1])
-
- ctypes = ['application/json', 'application/xml']
- bm = self.accept.best_match(ctypes)
-
- return bm or 'application/json'
-
- def get_content_type(self):
- allowed_types = ("application/xml", "application/json")
- if not "Content-Type" in self.headers:
- msg = _("Missing Content-Type")
- LOG.debug(msg)
- raise webob.exc.HTTPBadRequest(msg)
- type = self.content_type
- if type in allowed_types:
- return type
- LOG.debug(_("Wrong Content-Type: %s") % type)
- raise webob.exc.HTTPBadRequest("Invalid content type")
+ pass
class Application(object):
@@ -289,8 +260,8 @@ class Router(object):
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
- well and have your controller be a wsgi.Controller, who will route
- the request to the action method.
+ well and have your controller be an object that can route
+ the request to the action-specific method.
Examples:
mapper = routes.Mapper()
@@ -338,223 +309,6 @@ class Router(object):
return app
-class Controller(object):
- """WSGI app that dispatched to methods.
-
- WSGI app that reads routing information supplied by RoutesMiddleware
- and calls the requested action method upon itself. All action methods
- must, in addition to their normal parameters, accept a 'req' argument
- which is the incoming wsgi.Request. They raise a webob.exc exception,
- or return a dict which will be serialized by requested content type.
-
- """
-
- @webob.dec.wsgify(RequestClass=Request)
- def __call__(self, req):
- """Call the method specified in req.environ by RoutesMiddleware."""
- arg_dict = req.environ['wsgiorg.routing_args'][1]
- action = arg_dict['action']
- method = getattr(self, action)
- LOG.debug("%s %s" % (req.method, req.url))
- del arg_dict['controller']
- del arg_dict['action']
- if 'format' in arg_dict:
- del arg_dict['format']
- arg_dict['req'] = req
- result = method(**arg_dict)
-
- if type(result) is dict:
- content_type = req.best_match_content_type()
- default_xmlns = self.get_default_xmlns(req)
- body = self._serialize(result, content_type, default_xmlns)
-
- response = webob.Response()
- response.headers['Content-Type'] = content_type
- response.body = body
- msg_dict = dict(url=req.url, status=response.status_int)
- msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
- LOG.debug(msg)
- return response
- else:
- return result
-
- def _serialize(self, data, content_type, default_xmlns):
- """Serialize the given dict to the provided content_type.
-
- Uses self._serialization_metadata if it exists, which is a dict mapping
- MIME types to information needed to serialize to that type.
-
- """
- _metadata = getattr(type(self), '_serialization_metadata', {})
-
- serializer = Serializer(_metadata, default_xmlns)
- try:
- return serializer.serialize(data, content_type)
- except exception.InvalidContentType:
- raise webob.exc.HTTPNotAcceptable()
-
- def _deserialize(self, data, content_type):
- """Deserialize the request body to the specefied content type.
-
- Uses self._serialization_metadata if it exists, which is a dict mapping
- MIME types to information needed to serialize to that type.
-
- """
- _metadata = getattr(type(self), '_serialization_metadata', {})
- serializer = Serializer(_metadata)
- return serializer.deserialize(data, content_type)
-
- def get_default_xmlns(self, req):
- """Provide the XML namespace to use if none is otherwise specified."""
- return None
-
-
-class Serializer(object):
- """Serializes and deserializes dictionaries to certain MIME types."""
-
- def __init__(self, metadata=None, default_xmlns=None):
- """Create a serializer based on the given WSGI environment.
-
- 'metadata' is an optional dict mapping MIME types to information
- needed to serialize a dictionary to that type.
-
- """
- self.metadata = metadata or {}
- self.default_xmlns = default_xmlns
-
- def _get_serialize_handler(self, content_type):
- handlers = {
- 'application/json': self._to_json,
- 'application/xml': self._to_xml,
- }
-
- try:
- return handlers[content_type]
- except Exception:
- raise exception.InvalidContentType(content_type=content_type)
-
- def serialize(self, data, content_type):
- """Serialize a dictionary into the specified content type."""
- return self._get_serialize_handler(content_type)(data)
-
- def deserialize(self, datastring, content_type):
- """Deserialize a string to a dictionary.
-
- The string must be in the format of a supported MIME type.
-
- """
- return self.get_deserialize_handler(content_type)(datastring)
-
- def get_deserialize_handler(self, content_type):
- handlers = {
- 'application/json': self._from_json,
- 'application/xml': self._from_xml,
- }
-
- try:
- return handlers[content_type]
- except Exception:
- raise exception.InvalidContentType(content_type=content_type)
-
- def _from_json(self, datastring):
- return utils.loads(datastring)
-
- def _from_xml(self, datastring):
- xmldata = self.metadata.get('application/xml', {})
- plurals = set(xmldata.get('plurals', {}))
- node = minidom.parseString(datastring).childNodes[0]
- return {node.nodeName: self._from_xml_node(node, plurals)}
-
- def _from_xml_node(self, node, listnames):
- """Convert a minidom node to a simple Python type.
-
- listnames is a collection of names of XML nodes whose subnodes should
- be considered list items.
-
- """
- if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
- return node.childNodes[0].nodeValue
- elif node.nodeName in listnames:
- return [self._from_xml_node(n, listnames) for n in node.childNodes]
- else:
- result = dict()
- for attr in node.attributes.keys():
- result[attr] = node.attributes[attr].nodeValue
- for child in node.childNodes:
- if child.nodeType != node.TEXT_NODE:
- result[child.nodeName] = self._from_xml_node(child,
- listnames)
- return result
-
- def _to_json(self, data):
- return utils.dumps(data)
-
- def _to_xml(self, data):
- metadata = self.metadata.get('application/xml', {})
- # We expect data to contain a single key which is the XML root.
- root_key = data.keys()[0]
- doc = minidom.Document()
- node = self._to_xml_node(doc, metadata, root_key, data[root_key])
-
- xmlns = node.getAttribute('xmlns')
- if not xmlns and self.default_xmlns:
- node.setAttribute('xmlns', self.default_xmlns)
-
- return node.toprettyxml(indent=' ')
-
- def _to_xml_node(self, doc, metadata, nodename, data):
- """Recursive method to convert data members to XML nodes."""
- result = doc.createElement(nodename)
-
- # Set the xml namespace if one is specified
- # TODO(justinsb): We could also use prefixes on the keys
- xmlns = metadata.get('xmlns', None)
- if xmlns:
- result.setAttribute('xmlns', xmlns)
-
- if type(data) is list:
- collections = metadata.get('list_collections', {})
- if nodename in collections:
- metadata = collections[nodename]
- for item in data:
- node = doc.createElement(metadata['item_name'])
- node.setAttribute(metadata['item_key'], str(item))
- result.appendChild(node)
- return result
- singular = metadata.get('plurals', {}).get(nodename, None)
- if singular is None:
- if nodename.endswith('s'):
- singular = nodename[:-1]
- else:
- singular = 'item'
- for item in data:
- node = self._to_xml_node(doc, metadata, singular, item)
- result.appendChild(node)
- elif type(data) is dict:
- collections = metadata.get('dict_collections', {})
- if nodename in collections:
- metadata = collections[nodename]
- for k, v in data.items():
- node = doc.createElement(metadata['item_name'])
- node.setAttribute(metadata['item_key'], str(k))
- text = doc.createTextNode(str(v))
- node.appendChild(text)
- result.appendChild(node)
- return result
- attrs = metadata.get('attributes', {}).get(nodename, {})
- for k, v in data.items():
- if k in attrs:
- result.setAttribute(k, str(v))
- else:
- node = self._to_xml_node(doc, metadata, k, v)
- result.appendChild(node)
- else:
- # Type is atom
- node = doc.createTextNode(str(data))
- result.appendChild(node)
- return result
-
-
def paste_config_file(basename):
"""Find the best location in the system for a paste config file.