summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Authors2
-rw-r--r--nova/api/direct.py12
-rw-r--r--nova/api/ec2/__init__.py6
-rw-r--r--nova/api/ec2/cloud.py70
-rw-r--r--nova/api/ec2/metadatarequesthandler.py11
-rw-r--r--nova/api/openstack/__init__.py43
-rw-r--r--nova/api/openstack/accounts.py33
-rw-r--r--nova/api/openstack/backup_schedules.py27
-rw-r--r--nova/api/openstack/common.py7
-rw-r--r--nova/api/openstack/consoles.py29
-rw-r--r--nova/api/openstack/contrib/volumes.py25
-rw-r--r--nova/api/openstack/extensions.py95
-rw-r--r--nova/api/openstack/faults.py39
-rw-r--r--nova/api/openstack/flavors.py34
-rw-r--r--nova/api/openstack/image_metadata.py20
-rw-r--r--nova/api/openstack/images.py72
-rw-r--r--nova/api/openstack/ips.py33
-rw-r--r--nova/api/openstack/limits.py49
-rw-r--r--nova/api/openstack/server_metadata.py21
-rw-r--r--nova/api/openstack/servers.py140
-rw-r--r--nova/api/openstack/shared_ip_groups.py28
-rw-r--r--nova/api/openstack/users.py43
-rw-r--r--nova/api/openstack/versions.py43
-rw-r--r--nova/api/openstack/wsgi.py380
-rw-r--r--nova/api/openstack/zones.py33
-rw-r--r--nova/auth/novarc.template2
-rw-r--r--nova/compute/api.py33
-rw-r--r--nova/compute/manager.py5
-rw-r--r--nova/db/api.py39
-rw-r--r--nova/db/sqlalchemy/api.py115
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py2
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py29
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py70
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py47
-rw-r--r--nova/db/sqlalchemy/models.py27
-rw-r--r--nova/exception.py13
-rw-r--r--nova/flags.py1
-rw-r--r--nova/image/fake.py4
-rw-r--r--nova/image/glance.py8
-rw-r--r--nova/image/local.py4
-rw-r--r--nova/image/s3.py94
-rw-r--r--nova/image/service.py4
-rw-r--r--nova/objectstore/s3server.py2
-rw-r--r--nova/rpc.py1
-rw-r--r--nova/scheduler/host_filter.py101
-rw-r--r--nova/scheduler/manager.py7
-rw-r--r--nova/scheduler/zone_aware_scheduler.py91
-rw-r--r--nova/tests/api/openstack/extensions/foxinsocks.py4
-rw-r--r--nova/tests/api/openstack/fakes.py4
-rw-r--r--nova/tests/api/openstack/test_extensions.py4
-rw-r--r--nova/tests/api/openstack/test_images.py141
-rw-r--r--nova/tests/api/openstack/test_limits.py4
-rw-r--r--nova/tests/api/openstack/test_servers.py56
-rw-r--r--nova/tests/api/openstack/test_wsgi.py293
-rw-r--r--nova/tests/api/test_wsgi.py189
-rw-r--r--nova/tests/image/test_glance.py2
-rw-r--r--nova/tests/integrated/api/client.py10
-rw-r--r--nova/tests/integrated/test_servers.py106
-rw-r--r--nova/tests/integrated/test_xml.py4
-rw-r--r--nova/tests/test_cloud.py65
-rw-r--r--nova/tests/test_host_filter.py115
-rw-r--r--nova/tests/test_libvirt.py3
-rw-r--r--nova/tests/test_quota.py2
-rw-r--r--nova/tests/test_volume.py50
-rw-r--r--nova/tests/test_zone_aware_scheduler.py24
-rw-r--r--nova/virt/fake.py23
-rw-r--r--nova/virt/libvirt.xml.template2
-rw-r--r--nova/virt/libvirt/connection.py17
-rw-r--r--nova/virt/vmwareapi/vmops.py6
-rw-r--r--nova/vnc/__init__.py2
-rw-r--r--nova/volume/api.py57
-rw-r--r--nova/volume/driver.py134
-rw-r--r--nova/volume/manager.py59
-rw-r--r--nova/wsgi.py252
-rwxr-xr-xplugins/xenserver/networking/etc/init.d/openvswitch-nova96
-rw-r--r--plugins/xenserver/networking/etc/sysconfig/openvswitch-nova1
-rw-r--r--plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules3
-rw-r--r--plugins/xenserver/networking/etc/xensource/scripts/novalib.py40
-rwxr-xr-xplugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py62
-rwxr-xr-xplugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py180
-rwxr-xr-xplugins/xenserver/networking/etc/xensource/scripts/vif_rules.py30
-rw-r--r--tools/install_venv.py2
82 files changed, 2911 insertions, 1125 deletions
diff --git a/Authors b/Authors
index 50f4680a9..8b30fba77 100644
--- a/Authors
+++ b/Authors
@@ -30,6 +30,7 @@ Gabe Westmaas <gabe.westmaas@rackspace.com>
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
Hisaki Ohara <hisaki.ohara@intel.com>
Ilya Alekseyev <ialekseev@griddynamics.com>
+Isaku Yamahata <yamahata@valinux.co.jp>
Jason Koelker <jason@koelker.net>
Jay Pipes <jaypipes@gmail.com>
Jesse Andrews <anotherjesse@gmail.com>
@@ -83,6 +84,7 @@ Trey Morris <trey.morris@rackspace.com>
Tushar Patil <tushar.vitthal.patil@gmail.com>
Vasiliy Shlykov <vash@vasiliyshlykov.org>
Vishvananda Ishaya <vishvananda@gmail.com>
+Vivek Y S <vivek.ys@gmail.com>
William Wolf <throughnothing@gmail.com>
Yoshiaki Tamura <yoshi@midokura.jp>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
diff --git a/nova/api/direct.py b/nova/api/direct.py
index 8ceae299c..ea20042a7 100644
--- a/nova/api/direct.py
+++ b/nova/api/direct.py
@@ -42,6 +42,7 @@ from nova import exception
from nova import flags
from nova import utils
from nova import wsgi
+import nova.api.openstack.wsgi
# Global storage for registering modules.
@@ -251,7 +252,7 @@ class Reflection(object):
return self._methods[method]
-class ServiceWrapper(wsgi.Controller):
+class ServiceWrapper(object):
"""Wrapper to dynamically povide a WSGI controller for arbitrary objects.
With lightweight introspection allows public methods on the object to
@@ -265,7 +266,7 @@ class ServiceWrapper(wsgi.Controller):
def __init__(self, service_handle):
self.service_handle = service_handle
- @webob.dec.wsgify(RequestClass=wsgi.Request)
+ @webob.dec.wsgify(RequestClass=nova.api.openstack.wsgi.Request)
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict['action']
@@ -289,8 +290,11 @@ class ServiceWrapper(wsgi.Controller):
try:
content_type = req.best_match_content_type()
- default_xmlns = self.get_default_xmlns(req)
- return self._serialize(result, content_type, default_xmlns)
+ serializer = {
+ 'application/xml': nova.api.openstack.wsgi.XMLDictSerializer(),
+ 'application/json': nova.api.openstack.wsgi.JSONDictSerializer(),
+ }[content_type]
+ return serializer.serialize(result)
except:
raise exception.Error("returned non-serializable type: %s"
% result)
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index c13993dd3..1915d007d 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -327,6 +327,12 @@ class Executor(wsgi.Application):
ec2_id = ec2utils.id_to_ec2_id(ex.volume_id, 'vol-%08x')
message = _('Volume %s not found') % ec2_id
return self._error(req, context, type(ex).__name__, message)
+ except exception.SnapshotNotFound as ex:
+ LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex),
+ context=context)
+ ec2_id = ec2utils.id_to_ec2_id(ex.snapshot_id, 'snap-%08x')
+ message = _('Snapshot %s not found') % ec2_id
+ return self._error(req, context, type(ex).__name__, message)
except exception.NotFound as ex:
LOG.info(_('NotFound raised: %s'), unicode(ex), context=context)
return self._error(req, context, type(ex).__name__, unicode(ex))
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index c35b6024e..79cc3b3bf 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -283,14 +283,50 @@ class CloudController(object):
owner=None,
restorable_by=None,
**kwargs):
- return {'snapshotSet': [{'snapshotId': 'fixme',
- 'volumeId': 'fixme',
- 'status': 'fixme',
- 'startTime': 'fixme',
- 'progress': 'fixme',
- 'ownerId': 'fixme',
- 'volumeSize': 0,
- 'description': 'fixme'}]}
+ if snapshot_id:
+ snapshots = []
+ for ec2_id in snapshot_id:
+ internal_id = ec2utils.ec2_id_to_id(ec2_id)
+ snapshot = self.volume_api.get_snapshot(
+ context,
+ snapshot_id=internal_id)
+ snapshots.append(snapshot)
+ else:
+ snapshots = self.volume_api.get_all_snapshots(context)
+ snapshots = [self._format_snapshot(context, s) for s in snapshots]
+ return {'snapshotSet': snapshots}
+
+ def _format_snapshot(self, context, snapshot):
+ s = {}
+ s['snapshotId'] = ec2utils.id_to_ec2_id(snapshot['id'], 'snap-%08x')
+ s['volumeId'] = ec2utils.id_to_ec2_id(snapshot['volume_id'],
+ 'vol-%08x')
+ s['status'] = snapshot['status']
+ s['startTime'] = snapshot['created_at']
+ s['progress'] = snapshot['progress']
+ s['ownerId'] = snapshot['project_id']
+ s['volumeSize'] = snapshot['volume_size']
+ s['description'] = snapshot['display_description']
+
+ s['display_name'] = snapshot['display_name']
+ s['display_description'] = snapshot['display_description']
+ return s
+
+ def create_snapshot(self, context, volume_id, **kwargs):
+ LOG.audit(_("Create snapshot of volume %s"), volume_id,
+ context=context)
+ volume_id = ec2utils.ec2_id_to_id(volume_id)
+ snapshot = self.volume_api.create_snapshot(
+ context,
+ volume_id=volume_id,
+ name=kwargs.get('display_name'),
+ description=kwargs.get('display_description'))
+ return self._format_snapshot(context, snapshot)
+
+ def delete_snapshot(self, context, snapshot_id, **kwargs):
+ snapshot_id = ec2utils.ec2_id_to_id(snapshot_id)
+ self.volume_api.delete_snapshot(context, snapshot_id=snapshot_id)
+ return True
def describe_key_pairs(self, context, key_name=None, **kwargs):
key_pairs = db.key_pair_get_all_by_user(context, context.user_id)
@@ -619,16 +655,30 @@ class CloudController(object):
'volumeId': v['volumeId']}]
else:
v['attachmentSet'] = [{}]
+ if volume.get('snapshot_id') != None:
+ v['snapshotId'] = ec2utils.id_to_ec2_id(volume['snapshot_id'],
+ 'snap-%08x')
+ else:
+ v['snapshotId'] = None
v['display_name'] = volume['display_name']
v['display_description'] = volume['display_description']
return v
- def create_volume(self, context, size, **kwargs):
- LOG.audit(_("Create volume of %s GB"), size, context=context)
+ def create_volume(self, context, **kwargs):
+ size = kwargs.get('size')
+ if kwargs.get('snapshot_id') != None:
+ snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id'])
+ LOG.audit(_("Create volume from snapshot %s"), snapshot_id,
+ context=context)
+ else:
+ snapshot_id = None
+ LOG.audit(_("Create volume of %s GB"), size, context=context)
+
volume = self.volume_api.create(
context,
size=size,
+ snapshot_id=snapshot_id,
name=kwargs.get('display_name'),
description=kwargs.get('display_description'))
# TODO(vish): Instance should be None at db layer instead of
diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py
index 28f99b0ef..b70266a20 100644
--- a/nova/api/ec2/metadatarequesthandler.py
+++ b/nova/api/ec2/metadatarequesthandler.py
@@ -23,6 +23,7 @@ import webob.exc
from nova import log as logging
from nova import flags
+from nova import utils
from nova import wsgi
from nova.api.ec2 import cloud
@@ -71,7 +72,15 @@ class MetadataRequestHandler(wsgi.Application):
remote_address = req.remote_addr
if FLAGS.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
- meta_data = cc.get_metadata(remote_address)
+ try:
+ meta_data = cc.get_metadata(remote_address)
+ except Exception:
+ LOG.exception(_('Failed to get metadata for ip: %s'),
+ remote_address)
+ msg = _('An unknown error has occurred. '
+ 'Please try your request again.')
+ exc = webob.exc.HTTPInternalServerError(explanation=unicode(msg))
+ return exc
if meta_data is None:
LOG.error(_('Failed to get metadata for ip: %s'), remote_address)
raise webob.exc.HTTPNotFound()
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 5b7f080ad..d8fb5265b 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -26,7 +26,7 @@ import webob.exc
from nova import flags
from nova import log as logging
-from nova import wsgi
+from nova import wsgi as base_wsgi
from nova.api.openstack import accounts
from nova.api.openstack import faults
from nova.api.openstack import backup_schedules
@@ -40,6 +40,7 @@ from nova.api.openstack import servers
from nova.api.openstack import server_metadata
from nova.api.openstack import shared_ip_groups
from nova.api.openstack import users
+from nova.api.openstack import wsgi
from nova.api.openstack import zones
@@ -50,7 +51,7 @@ flags.DEFINE_bool('allow_admin_api',
'When True, this API service will accept admin operations.')
-class FaultWrapper(wsgi.Middleware):
+class FaultWrapper(base_wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
@@ -63,7 +64,7 @@ class FaultWrapper(wsgi.Middleware):
return faults.Fault(exc)
-class APIRouter(wsgi.Router):
+class APIRouter(base_wsgi.Router):
"""
Routes requests on the OpenStack API to the appropriate controller
and method.
@@ -97,19 +98,21 @@ class APIRouter(wsgi.Router):
server_members['reset_network'] = 'POST'
server_members['inject_network_info'] = 'POST'
- mapper.resource("zone", "zones", controller=zones.Controller(),
+ mapper.resource("zone", "zones",
+ controller=zones.create_resource(),
collection={'detail': 'GET', 'info': 'GET',
'select': 'GET'})
- mapper.resource("user", "users", controller=users.Controller(),
+ mapper.resource("user", "users",
+ controller=users.create_resource(),
collection={'detail': 'GET'})
mapper.resource("account", "accounts",
- controller=accounts.Controller(),
+ controller=accounts.create_resource(),
collection={'detail': 'GET'})
mapper.resource("console", "consoles",
- controller=consoles.Controller(),
+ controller=consoles.create_resource(),
parent_resource=dict(member_name='server',
collection_name='servers'))
@@ -122,31 +125,31 @@ class APIRouterV10(APIRouter):
def _setup_routes(self, mapper):
super(APIRouterV10, self)._setup_routes(mapper)
mapper.resource("server", "servers",
- controller=servers.ControllerV10(),
+ controller=servers.create_resource('1.0'),
collection={'detail': 'GET'},
member=self.server_members)
mapper.resource("image", "images",
- controller=images.ControllerV10(),
+ controller=images.create_resource('1.0'),
collection={'detail': 'GET'})
mapper.resource("flavor", "flavors",
- controller=flavors.ControllerV10(),
+ controller=flavors.create_resource('1.0'),
collection={'detail': 'GET'})
mapper.resource("shared_ip_group", "shared_ip_groups",
collection={'detail': 'GET'},
- controller=shared_ip_groups.Controller())
+ controller=shared_ip_groups.create_resource())
mapper.resource("backup_schedule", "backup_schedule",
- controller=backup_schedules.Controller(),
+ controller=backup_schedules.create_resource(),
parent_resource=dict(member_name='server',
collection_name='servers'))
mapper.resource("limit", "limits",
- controller=limits.LimitsControllerV10())
+ controller=limits.create_resource('1.0'))
- mapper.resource("ip", "ips", controller=ips.Controller(),
+ mapper.resource("ip", "ips", controller=ips.create_resource(),
collection=dict(public='GET', private='GET'),
parent_resource=dict(member_name='server',
collection_name='servers'))
@@ -158,27 +161,27 @@ class APIRouterV11(APIRouter):
def _setup_routes(self, mapper):
super(APIRouterV11, self)._setup_routes(mapper)
mapper.resource("server", "servers",
- controller=servers.ControllerV11(),
+ controller=servers.create_resource('1.1'),
collection={'detail': 'GET'},
member=self.server_members)
mapper.resource("image", "images",
- controller=images.ControllerV11(),
+ controller=images.create_resource('1.1'),
collection={'detail': 'GET'})
mapper.resource("image_meta", "meta",
- controller=image_metadata.Controller(),
+ controller=image_metadata.create_resource(),
parent_resource=dict(member_name='image',
collection_name='images'))
mapper.resource("server_meta", "meta",
- controller=server_metadata.Controller(),
+ controller=server_metadata.create_resource(),
parent_resource=dict(member_name='server',
collection_name='servers'))
mapper.resource("flavor", "flavors",
- controller=flavors.ControllerV11(),
+ controller=flavors.create_resource('1.1'),
collection={'detail': 'GET'})
mapper.resource("limit", "limits",
- controller=limits.LimitsControllerV11())
+ controller=limits.create_resource('1.1'))
diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py
index 00fdd4540..0dcd37217 100644
--- a/nova/api/openstack/accounts.py
+++ b/nova/api/openstack/accounts.py
@@ -20,8 +20,9 @@ from nova import flags
from nova import log as logging
from nova.auth import manager
-from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
+
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.api.openstack')
@@ -34,12 +35,7 @@ def _translate_keys(account):
manager=account.project_manager_id)
-class Controller(common.OpenstackController):
-
- _serialization_metadata = {
- 'application/xml': {
- "attributes": {
- "account": ["id", "name", "description", "manager"]}}}
+class Controller(object):
def __init__(self):
self.manager = manager.AuthManager()
@@ -66,20 +62,33 @@ class Controller(common.OpenstackController):
self.manager.delete_project(id)
return {}
- def create(self, req):
+ def create(self, req, body):
"""We use update with create-or-update semantics
because the id comes from an external source"""
raise faults.Fault(webob.exc.HTTPNotImplemented())
- def update(self, req, id):
+ def update(self, req, id, body):
"""This is really create or update."""
self._check_admin(req.environ['nova.context'])
- env = self._deserialize(req.body, req.get_content_type())
- description = env['account'].get('description')
- manager = env['account'].get('manager')
+ description = body['account'].get('description')
+ manager = body['account'].get('manager')
try:
account = self.manager.get_project(id)
self.manager.modify_project(id, manager, description)
except exception.NotFound:
account = self.manager.create_project(id, manager, description)
return dict(account=_translate_keys(account))
+
+
+def create_resource():
+ metadata = {
+ "attributes": {
+ "account": ["id", "name", "description", "manager"],
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py
index 4bf744046..71a14d4ce 100644
--- a/nova/api/openstack/backup_schedules.py
+++ b/nova/api/openstack/backup_schedules.py
@@ -19,9 +19,8 @@ import time
from webob import exc
-from nova.api.openstack import common
from nova.api.openstack import faults
-import nova.image.service
+from nova.api.openstack import wsgi
def _translate_keys(inst):
@@ -29,14 +28,9 @@ def _translate_keys(inst):
return dict(backupSchedule=inst)
-class Controller(common.OpenstackController):
+class Controller(object):
""" The backup schedule API controller for the Openstack API """
- _serialization_metadata = {
- 'application/xml': {
- 'attributes': {
- 'backupSchedule': []}}}
-
def __init__(self):
pass
@@ -48,7 +42,7 @@ class Controller(common.OpenstackController):
""" Returns a single backup schedule for a given instance """
return faults.Fault(exc.HTTPNotImplemented())
- def create(self, req, server_id):
+ def create(self, req, server_id, body):
""" No actual update method required, since the existing API allows
both create and update through a POST """
return faults.Fault(exc.HTTPNotImplemented())
@@ -56,3 +50,18 @@ class Controller(common.OpenstackController):
def delete(self, req, server_id, id):
""" Deletes an existing backup schedule """
return faults.Fault(exc.HTTPNotImplemented())
+
+
+def create_resource():
+ metadata = {
+ 'attributes': {
+ 'backupSchedule': [],
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10,
+ metadata=metadata),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 32cd689ca..bb1a96812 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -23,7 +23,6 @@ import webob
from nova import exception
from nova import flags
from nova import log as logging
-from nova import wsgi
LOG = logging.getLogger('nova.api.openstack.common')
@@ -146,9 +145,3 @@ def get_id_from_href(href):
except:
LOG.debug(_("Error extracting id from href: %s") % href)
raise webob.exc.HTTPBadRequest(_('could not parse id from href'))
-
-
-class OpenstackController(wsgi.Controller):
- def get_default_xmlns(self, req):
- # Use V10 by default
- return XML_NS_V10
diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py
index 1a77f25d7..bccf04d8f 100644
--- a/nova/api/openstack/consoles.py
+++ b/nova/api/openstack/consoles.py
@@ -19,8 +19,8 @@ from webob import exc
from nova import console
from nova import exception
-from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
def _translate_keys(cons):
@@ -43,17 +43,11 @@ def _translate_detail_keys(cons):
return dict(console=info)
-class Controller(common.OpenstackController):
- """The Consoles Controller for the Openstack API"""
-
- _serialization_metadata = {
- 'application/xml': {
- 'attributes': {
- 'console': []}}}
+class Controller(object):
+ """The Consoles controller for the Openstack API"""
def __init__(self):
self.console_api = console.API()
- super(Controller, self).__init__()
def index(self, req, server_id):
"""Returns a list of consoles for this instance"""
@@ -63,9 +57,8 @@ class Controller(common.OpenstackController):
return dict(consoles=[_translate_keys(console)
for console in consoles])
- def create(self, req, server_id):
+ def create(self, req, server_id, body):
"""Creates a new console"""
- #info = self._deserialize(req.body, req.get_content_type())
self.console_api.create_console(
req.environ['nova.context'],
int(server_id))
@@ -94,3 +87,17 @@ class Controller(common.OpenstackController):
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
+
+
+def create_resource():
+ metadata = {
+ 'attributes': {
+ 'console': [],
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py
index 18de2ec71..feabdce89 100644
--- a/nova/api/openstack/contrib/volumes.py
+++ b/nova/api/openstack/contrib/volumes.py
@@ -22,7 +22,6 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import volume
-from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import faults
@@ -64,7 +63,7 @@ def _translate_volume_summary_view(context, vol):
return d
-class VolumeController(wsgi.Controller):
+class VolumeController(object):
"""The Volumes API controller for the OpenStack API."""
_serialization_metadata = {
@@ -124,18 +123,17 @@ class VolumeController(wsgi.Controller):
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
- def create(self, req):
+ def create(self, req, body):
"""Creates a new volume."""
context = req.environ['nova.context']
- env = self._deserialize(req.body, req.get_content_type())
- if not env:
+ if not body:
return faults.Fault(exc.HTTPUnprocessableEntity())
- vol = env['volume']
+ vol = body['volume']
size = vol['size']
LOG.audit(_("Create volume of %s GB"), size, context=context)
- new_volume = self.volume_api.create(context, size,
+ new_volume = self.volume_api.create(context, size, None,
vol.get('display_name'),
vol.get('display_description'))
@@ -175,7 +173,7 @@ def _translate_attachment_summary_view(_context, vol):
return d
-class VolumeAttachmentController(wsgi.Controller):
+class VolumeAttachmentController(object):
"""The volume attachment API controller for the Openstack API.
A child resource of the server. Note that we use the volume id
@@ -219,17 +217,16 @@ class VolumeAttachmentController(wsgi.Controller):
return {'volumeAttachment': _translate_attachment_detail_view(context,
vol)}
- def create(self, req, server_id):
+ def create(self, req, server_id, body):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
- env = self._deserialize(req.body, req.get_content_type())
- if not env:
+ if not body:
return faults.Fault(exc.HTTPUnprocessableEntity())
instance_id = server_id
- volume_id = env['volumeAttachment']['volumeId']
- device = env['volumeAttachment']['device']
+ volume_id = body['volumeAttachment']['volumeId']
+ device = body['volumeAttachment']['device']
msg = _("Attach volume %(volume_id)s to instance %(server_id)s"
" at %(device)s") % locals()
@@ -259,7 +256,7 @@ class VolumeAttachmentController(wsgi.Controller):
# TODO(justinsb): How do I return "accepted" here?
return {'volumeAttachment': attachment}
- def update(self, _req, _server_id, _id):
+ def update(self, req, server_id, id, body):
"""Update a volume attachment. We don't currently support this."""
return faults.Fault(exc.HTTPBadRequest())
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index 8e77b25fb..881b61733 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -27,9 +27,10 @@ import webob.exc
from nova import exception
from nova import flags
from nova import log as logging
-from nova import wsgi
+from nova import wsgi as base_wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
LOG = logging.getLogger('extensions')
@@ -115,28 +116,34 @@ class ExtensionDescriptor(object):
return request_exts
-class ActionExtensionController(common.OpenstackController):
-
+class ActionExtensionController(object):
def __init__(self, application):
-
self.application = application
self.action_handlers = {}
def add_action(self, action_name, handler):
self.action_handlers[action_name] = handler
- def action(self, req, id):
-
- input_dict = self._deserialize(req.body, req.get_content_type())
+ def action(self, req, id, body):
for action_name, handler in self.action_handlers.iteritems():
- if action_name in input_dict:
- return handler(input_dict, req, id)
+ if action_name in body:
+ return handler(body, req, id)
# no action handler found (bump to downstream application)
res = self.application
return res
-class RequestExtensionController(common.OpenstackController):
+class ActionExtensionResource(wsgi.Resource):
+
+ def __init__(self, application):
+ controller = ActionExtensionController(application)
+ super(ActionExtensionResource, self).__init__(controller)
+
+ def add_action(self, action_name, handler):
+ self.controller.add_action(action_name, handler)
+
+
+class RequestExtensionController(object):
def __init__(self, application):
self.application = application
@@ -153,7 +160,17 @@ class RequestExtensionController(common.OpenstackController):
return res
-class ExtensionController(common.OpenstackController):
+class RequestExtensionResource(wsgi.Resource):
+
+ def __init__(self, application):
+ controller = RequestExtensionController(application)
+ super(RequestExtensionResource, self).__init__(controller)
+
+ def add_handler(self, handler):
+ self.controller.add_handler(handler)
+
+
+class ExtensionsResource(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
@@ -186,7 +203,7 @@ class ExtensionController(common.OpenstackController):
raise faults.Fault(webob.exc.HTTPNotFound())
-class ExtensionMiddleware(wsgi.Middleware):
+class ExtensionMiddleware(base_wsgi.Middleware):
"""Extensions middleware for WSGI."""
@classmethod
def factory(cls, global_config, **local_config):
@@ -195,43 +212,43 @@ class ExtensionMiddleware(wsgi.Middleware):
return cls(app, **local_config)
return _factory
- def _action_ext_controllers(self, application, ext_mgr, mapper):
- """Return a dict of ActionExtensionController-s by collection."""
- action_controllers = {}
+ def _action_ext_resources(self, application, ext_mgr, mapper):
+ """Return a dict of ActionExtensionResource-s by collection."""
+ action_resources = {}
for action in ext_mgr.get_actions():
- if not action.collection in action_controllers.keys():
- controller = ActionExtensionController(application)
+ if not action.collection in action_resources.keys():
+ resource = ActionExtensionResource(application)
mapper.connect("/%s/:(id)/action.:(format)" %
action.collection,
action='action',
- controller=controller,
+ controller=resource,
conditions=dict(method=['POST']))
mapper.connect("/%s/:(id)/action" % action.collection,
action='action',
- controller=controller,
+ controller=resource,
conditions=dict(method=['POST']))
- action_controllers[action.collection] = controller
+ action_resources[action.collection] = resource
- return action_controllers
+ return action_resources
- def _request_ext_controllers(self, application, ext_mgr, mapper):
- """Returns a dict of RequestExtensionController-s by collection."""
- request_ext_controllers = {}
+ def _request_ext_resources(self, application, ext_mgr, mapper):
+ """Returns a dict of RequestExtensionResource-s by collection."""
+ request_ext_resources = {}
for req_ext in ext_mgr.get_request_extensions():
- if not req_ext.key in request_ext_controllers.keys():
- controller = RequestExtensionController(application)
+ if not req_ext.key in request_ext_resources.keys():
+ resource = RequestExtensionResource(application)
mapper.connect(req_ext.url_route + '.:(format)',
action='process',
- controller=controller,
+ controller=resource,
conditions=req_ext.conditions)
mapper.connect(req_ext.url_route,
action='process',
- controller=controller,
+ controller=resource,
conditions=req_ext.conditions)
- request_ext_controllers[req_ext.key] = controller
+ request_ext_resources[req_ext.key] = resource
- return request_ext_controllers
+ return request_ext_resources
def __init__(self, application, ext_mgr=None):
@@ -246,22 +263,22 @@ class ExtensionMiddleware(wsgi.Middleware):
LOG.debug(_('Extended resource: %s'),
resource.collection)
mapper.resource(resource.collection, resource.collection,
- controller=resource.controller,
+ controller=wsgi.Resource(resource.controller),
collection=resource.collection_actions,
member=resource.member_actions,
parent_resource=resource.parent)
# extended actions
- action_controllers = self._action_ext_controllers(application, ext_mgr,
+ action_resources = self._action_ext_resources(application, ext_mgr,
mapper)
for action in ext_mgr.get_actions():
LOG.debug(_('Extended action: %s'), action.action_name)
- controller = action_controllers[action.collection]
- controller.add_action(action.action_name, action.handler)
+ resource = action_resources[action.collection]
+ resource.add_action(action.action_name, action.handler)
# extended requests
- req_controllers = self._request_ext_controllers(application, ext_mgr,
- mapper)
+ req_controllers = self._request_ext_resources(application, ext_mgr,
+ mapper)
for request_ext in ext_mgr.get_request_extensions():
LOG.debug(_('Extended request: %s'), request_ext.key)
controller = req_controllers[request_ext.key]
@@ -313,7 +330,7 @@ class ExtensionManager(object):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
- ExtensionController(self)))
+ ExtensionsResource(self)))
for alias, ext in self.extensions.iteritems():
try:
resources.extend(ext.get_resources())
@@ -410,7 +427,7 @@ class ExtensionManager(object):
class RequestExtension(object):
- """Extend requests and responses of core nova OpenStack API controllers.
+ """Extend requests and responses of core nova OpenStack API resources.
Provide a way to add data to responses and handle custom request data
that is sent to core nova OpenStack API controllers.
@@ -424,7 +441,7 @@ class RequestExtension(object):
class ActionExtension(object):
- """Add custom actions to core nova OpenStack API controllers."""
+ """Add custom actions to core nova OpenStack API resources."""
def __init__(self, collection, action_name, handler):
self.collection = collection
diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py
index 87118ce19..b9a23c126 100644
--- a/nova/api/openstack/faults.py
+++ b/nova/api/openstack/faults.py
@@ -19,8 +19,7 @@
import webob.dec
import webob.exc
-from nova import wsgi
-from nova.api.openstack import common
+from nova.api.openstack import wsgi
class Fault(webob.exc.HTTPException):
@@ -55,13 +54,21 @@ class Fault(webob.exc.HTTPException):
if code == 413:
retry = self.wrapped_exc.headers['Retry-After']
fault_data[fault_name]['retryAfter'] = retry
+
# 'code' is an attribute on the fault tag itself
- metadata = {'application/xml': {'attributes': {fault_name: 'code'}}}
- default_xmlns = common.XML_NS_V10
- serializer = wsgi.Serializer(metadata, default_xmlns)
+ metadata = {'attributes': {fault_name: 'code'}}
+
content_type = req.best_match_content_type()
- self.wrapped_exc.body = serializer.serialize(fault_data, content_type)
+
+ serializer = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
+ xmlns=wsgi.XMLNS_V10),
+ 'application/json': wsgi.JSONDictSerializer(),
+ }[content_type]
+
+ self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
+
return self.wrapped_exc
@@ -70,14 +77,6 @@ class OverLimitFault(webob.exc.HTTPException):
Rate-limited request response.
"""
- _serialization_metadata = {
- "application/xml": {
- "attributes": {
- "overLimitFault": "code",
- },
- },
- }
-
def __init__(self, message, details, retry_time):
"""
Initialize new `OverLimitFault` with relevant information.
@@ -97,8 +96,16 @@ class OverLimitFault(webob.exc.HTTPException):
Return the wrapped exception with a serialized body conforming to our
error format.
"""
- serializer = wsgi.Serializer(self._serialization_metadata)
content_type = request.best_match_content_type()
- content = serializer.serialize(self.content, content_type)
+ metadata = {"attributes": {"overLimitFault": "code"}}
+
+ serializer = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
+ xmlns=wsgi.XMLNS_V10),
+ 'application/json': wsgi.JSONDictSerializer(),
+ }[content_type]
+
+ content = serializer.serialize(self.content)
self.wrapped_exc.body = content
+
return self.wrapped_exc
diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py
index 4c5971cf6..a21ff6cb2 100644
--- a/nova/api/openstack/flavors.py
+++ b/nova/api/openstack/flavors.py
@@ -19,22 +19,13 @@ import webob
from nova import db
from nova import exception
-from nova.api.openstack import common
from nova.api.openstack import views
+from nova.api.openstack import wsgi
-class Controller(common.OpenstackController):
+class Controller(object):
"""Flavor controller for the OpenStack API."""
- _serialization_metadata = {
- 'application/xml': {
- "attributes": {
- "flavor": ["id", "name", "ram", "disk"],
- "link": ["rel", "type", "href"],
- }
- }
- }
-
def index(self, req):
"""Return all flavors in brief."""
items = self._get_flavors(req, is_detail=False)
@@ -71,14 +62,31 @@ class Controller(common.OpenstackController):
class ControllerV10(Controller):
+
def _get_view_builder(self, req):
return views.flavors.ViewBuilder()
class ControllerV11(Controller):
+
def _get_view_builder(self, req):
base_url = req.application_url
return views.flavors.ViewBuilderV11(base_url)
- def get_default_xmlns(self, req):
- return common.XML_NS_V11
+
+def create_resource(version='1.0'):
+ controller = {
+ '1.0': ControllerV10,
+ '1.1': ControllerV11,
+ }[version]()
+
+ xmlns = {
+ '1.0': wsgi.XMLNS_V10,
+ '1.1': wsgi.XMLNS_V11,
+ }[version]
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns),
+ }
+
+ return wsgi.Resource(controller, serializers=serializers)
diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py
index 1eccc0174..88e10168d 100644
--- a/nova/api/openstack/image_metadata.py
+++ b/nova/api/openstack/image_metadata.py
@@ -20,20 +20,18 @@ from webob import exc
from nova import flags
from nova import quota
from nova import utils
-from nova import wsgi
-from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
FLAGS = flags.FLAGS
-class Controller(common.OpenstackController):
+class Controller(object):
"""The image metadata API controller for the Openstack API"""
def __init__(self):
self.image_service = utils.import_object(FLAGS.image_service)
- super(Controller, self).__init__()
def _get_metadata(self, context, image_id, image=None):
if not image:
@@ -64,9 +62,8 @@ class Controller(common.OpenstackController):
else:
return faults.Fault(exc.HTTPNotFound())
- def create(self, req, image_id):
+ def create(self, req, image_id, body):
context = req.environ['nova.context']
- body = self._deserialize(req.body, req.get_content_type())
img = self.image_service.show(context, image_id)
metadata = self._get_metadata(context, image_id, img)
if 'metadata' in body:
@@ -77,9 +74,8 @@ class Controller(common.OpenstackController):
self.image_service.update(context, image_id, img, None)
return dict(metadata=metadata)
- def update(self, req, image_id, id):
+ def update(self, req, image_id, id, body):
context = req.environ['nova.context']
- body = self._deserialize(req.body, req.get_content_type())
if not id in body:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
@@ -104,3 +100,11 @@ class Controller(common.OpenstackController):
metadata.pop(id)
img['properties'] = metadata
self.image_service.update(context, image_id, img, None)
+
+
+def create_resource():
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 34d4c27fc..d6107a2d0 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -23,24 +23,17 @@ from nova import utils
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.api.openstack.views import images as images_view
+from nova.api.openstack import wsgi
LOG = log.getLogger('nova.api.openstack.images')
FLAGS = flags.FLAGS
+SUPPORTED_FILTERS = ['name', 'status']
-class Controller(common.OpenstackController):
- """Base `wsgi.Controller` for retrieving/displaying images."""
- _serialization_metadata = {
- 'application/xml': {
- "attributes": {
- "image": ["id", "name", "updated", "created", "status",
- "serverId", "progress"],
- "link": ["rel", "type", "href"],
- },
- },
- }
+class Controller(object):
+ """Base controller for retrieving/displaying images."""
def __init__(self, image_service=None, compute_service=None):
"""Initialize new `ImageController`.
@@ -59,7 +52,8 @@ class Controller(common.OpenstackController):
:param req: `wsgi.Request` object
"""
context = req.environ['nova.context']
- images = self._image_service.index(context)
+ filters = self._get_filters(req)
+ images = self._image_service.index(context, filters)
images = common.limited(images, req)
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=False) for image in images])
@@ -70,11 +64,26 @@ class Controller(common.OpenstackController):
:param req: `wsgi.Request` object.
"""
context = req.environ['nova.context']
- images = self._image_service.detail(context)
+ filters = self._get_filters(req)
+ images = self._image_service.detail(context, filters)
images = common.limited(images, req)
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=True) for image in images])
+ def _get_filters(self, req):
+ """
+ Return a dictionary of query param filters from the request
+
+ :param req: the Request object coming from the wsgi layer
+ :retval a dict of key/value filters
+ """
+ filters = {}
+ for param in req.str_params:
+ if param in SUPPORTED_FILTERS or param.startswith('property-'):
+ filters[param] = req.str_params.get(param)
+
+ return filters
+
def show(self, req, id):
"""Return detailed information about a specific image.
@@ -108,21 +117,20 @@ class Controller(common.OpenstackController):
self._image_service.delete(context, image_id)
return webob.exc.HTTPNoContent()
- def create(self, req):
+ def create(self, req, body):
"""Snapshot a server instance and save the image.
:param req: `wsgi.Request` object
"""
context = req.environ['nova.context']
content_type = req.get_content_type()
- image = self._deserialize(req.body, content_type)
- if not image:
+ if not body:
raise webob.exc.HTTPBadRequest()
try:
- server_id = image["image"]["serverId"]
- image_name = image["image"]["name"]
+ server_id = body["image"]["serverId"]
+ image_name = body["image"]["name"]
except KeyError:
raise webob.exc.HTTPBadRequest()
@@ -151,5 +159,29 @@ class ControllerV11(Controller):
base_url = request.application_url
return images_view.ViewBuilderV11(base_url)
- def get_default_xmlns(self, req):
- return common.XML_NS_V11
+
+def create_resource(version='1.0'):
+ controller = {
+ '1.0': ControllerV10,
+ '1.1': ControllerV11,
+ }[version]()
+
+ xmlns = {
+ '1.0': wsgi.XMLNS_V10,
+ '1.1': wsgi.XMLNS_V11,
+ }[version]
+
+ metadata = {
+ "attributes": {
+ "image": ["id", "name", "updated", "created", "status",
+ "serverId", "progress"],
+ "link": ["rel", "type", "href"],
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns,
+ metadata=metadata),
+ }
+
+ return wsgi.Resource(controller, serializers=serializers)
diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py
index 778e9ba1a..abea71830 100644
--- a/nova/api/openstack/ips.py
+++ b/nova/api/openstack/ips.py
@@ -20,23 +20,14 @@ import time
from webob import exc
import nova
-import nova.api.openstack.views.addresses
-from nova.api.openstack import common
from nova.api.openstack import faults
+import nova.api.openstack.views.addresses
+from nova.api.openstack import wsgi
-class Controller(common.OpenstackController):
+class Controller(object):
"""The servers addresses API controller for the Openstack API."""
- _serialization_metadata = {
- 'application/xml': {
- 'list_collections': {
- 'public': {'item_name': 'ip', 'item_key': 'addr'},
- 'private': {'item_name': 'ip', 'item_key': 'addr'},
- },
- },
- }
-
def __init__(self):
self.compute_api = nova.compute.API()
self.builder = nova.api.openstack.views.addresses.ViewBuilderV10()
@@ -65,8 +56,24 @@ class Controller(common.OpenstackController):
def show(self, req, server_id, id):
return faults.Fault(exc.HTTPNotImplemented())
- def create(self, req, server_id):
+ def create(self, req, server_id, body):
return faults.Fault(exc.HTTPNotImplemented())
def delete(self, req, server_id, id):
return faults.Fault(exc.HTTPNotImplemented())
+
+
+def create_resource():
+ metadata = {
+ 'list_collections': {
+ 'public': {'item_name': 'ip', 'item_key': 'addr'},
+ 'private': {'item_name': 'ip', 'item_key': 'addr'},
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
+ xmlns=wsgi.XMLNS_V10),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py
index bd0250a7f..4d46b92df 100644
--- a/nova/api/openstack/limits.py
+++ b/nova/api/openstack/limits.py
@@ -31,10 +31,12 @@ from collections import defaultdict
from webob.dec import wsgify
from nova import quota
+from nova import wsgi as base_wsgi
from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.api.openstack.views import limits as limits_views
+from nova.api.openstack import wsgi
# Convenience constants for the limits dictionary passed to Limiter().
@@ -44,23 +46,11 @@ PER_HOUR = 60 * 60
PER_DAY = 60 * 60 * 24
-class LimitsController(common.OpenstackController):
+class LimitsController(object):
"""
Controller for accessing limits in the OpenStack API.
"""
- _serialization_metadata = {
- "application/xml": {
- "attributes": {
- "limit": ["verb", "URI", "uri", "regex", "value", "unit",
- "resetTime", "next-available", "remaining", "name"],
- },
- "plurals": {
- "rate": "limit",
- },
- },
- }
-
def index(self, req):
"""
Return all global and rate limit information.
@@ -86,6 +76,35 @@ class LimitsControllerV11(LimitsController):
return limits_views.ViewBuilderV11()
+def create_resource(version='1.0'):
+ controller = {
+ '1.0': LimitsControllerV10,
+ '1.1': LimitsControllerV11,
+ }[version]()
+
+ xmlns = {
+ '1.0': wsgi.XMLNS_V10,
+ '1.1': wsgi.XMLNS_V11,
+ }[version]
+
+ metadata = {
+ "attributes": {
+ "limit": ["verb", "URI", "uri", "regex", "value", "unit",
+ "resetTime", "next-available", "remaining", "name"],
+ },
+ "plurals": {
+ "rate": "limit",
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns,
+ metadata=metadata)
+ }
+
+ return wsgi.Resource(controller, serializers=serializers)
+
+
class Limit(object):
"""
Stores information about a limit for HTTP requets.
@@ -197,7 +216,7 @@ DEFAULT_LIMITS = [
]
-class RateLimitingMiddleware(wsgi.Middleware):
+class RateLimitingMiddleware(base_wsgi.Middleware):
"""
Rate-limits requests passing through this middleware. All limit information
is stored in memory for this implementation.
@@ -211,7 +230,7 @@ class RateLimitingMiddleware(wsgi.Middleware):
@param application: WSGI application to wrap
@param limits: List of dictionaries describing limits
"""
- wsgi.Middleware.__init__(self, application)
+ base_wsgi.Middleware.__init__(self, application)
self._limiter = Limiter(limits or DEFAULT_LIMITS)
@wsgify(RequestClass=wsgi.Request)
diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py
index fd64ee4fb..b38b84a2a 100644
--- a/nova/api/openstack/server_metadata.py
+++ b/nova/api/openstack/server_metadata.py
@@ -19,12 +19,11 @@ from webob import exc
from nova import compute
from nova import quota
-from nova import wsgi
-from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
-class Controller(common.OpenstackController):
+class Controller(object):
""" The server metadata API controller for the Openstack API """
def __init__(self):
@@ -43,10 +42,9 @@ class Controller(common.OpenstackController):
context = req.environ['nova.context']
return self._get_metadata(context, server_id)
- def create(self, req, server_id):
+ def create(self, req, server_id, body):
context = req.environ['nova.context']
- data = self._deserialize(req.body, req.get_content_type())
- metadata = data.get('metadata')
+ metadata = body.get('metadata')
try:
self.compute_api.update_or_create_instance_metadata(context,
server_id,
@@ -55,9 +53,8 @@ class Controller(common.OpenstackController):
self._handle_quota_error(error)
return req.body
- def update(self, req, server_id, id):
+ def update(self, req, server_id, id, body):
context = req.environ['nova.context']
- body = self._deserialize(req.body, req.get_content_type())
if not id in body:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
@@ -92,3 +89,11 @@ class Controller(common.OpenstackController):
if error.code == "MetadataLimitExceeded":
raise exc.HTTPBadRequest(explanation=error.message)
raise error
+
+
+def create_resource():
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 1ec74bc2e..0f7dde389 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -31,6 +31,7 @@ import nova.api.openstack.views.addresses
import nova.api.openstack.views.flavors
import nova.api.openstack.views.images
import nova.api.openstack.views.servers
+from nova.api.openstack import wsgi
from nova.auth import manager as auth_manager
from nova.compute import instance_types
import nova.api.openstack
@@ -41,31 +42,12 @@ LOG = logging.getLogger('nova.api.openstack.servers')
FLAGS = flags.FLAGS
-class Controller(common.OpenstackController):
+class Controller(object):
""" The Server API controller for the OpenStack API """
- _serialization_metadata = {
- "application/xml": {
- "attributes": {
- "server": ["id", "imageId", "name", "flavorId", "hostId",
- "status", "progress", "adminPass", "flavorRef",
- "imageRef"],
- "link": ["rel", "type", "href"],
- },
- "dict_collections": {
- "metadata": {"item_name": "meta", "item_key": "key"},
- },
- "list_collections": {
- "public": {"item_name": "ip", "item_key": "addr"},
- "private": {"item_name": "ip", "item_key": "addr"},
- },
- },
- }
-
def __init__(self):
self.compute_api = compute.API()
self._image_service = utils.import_object(FLAGS.image_service)
- super(Controller, self).__init__()
def index(self, req):
""" Returns a list of server names and ids for a given user """
@@ -122,15 +104,14 @@ class Controller(common.OpenstackController):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
- def create(self, req):
+ def create(self, req, body):
""" Creates a new server for a given user """
- env = self._deserialize_create(req)
- if not env:
+ if not body:
return faults.Fault(exc.HTTPUnprocessableEntity())
context = req.environ['nova.context']
- password = self._get_server_admin_password(env['server'])
+ password = self._get_server_admin_password(body['server'])
key_name = None
key_data = None
@@ -140,7 +121,7 @@ class Controller(common.OpenstackController):
key_name = key_pair['name']
key_data = key_pair['public_key']
- requested_image_id = self._image_id_from_req_data(env)
+ requested_image_id = self._image_id_from_req_data(body)
try:
image_id = common.get_image_id_from_image_hash(self._image_service,
context, requested_image_id)
@@ -151,18 +132,18 @@ class Controller(common.OpenstackController):
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
req, image_id)
- personality = env['server'].get('personality')
+ personality = body['server'].get('personality')
injected_files = []
if personality:
injected_files = self._get_injected_files(personality)
- flavor_id = self._flavor_id_from_req_data(env)
+ flavor_id = self._flavor_id_from_req_data(body)
- if not 'name' in env['server']:
+ if not 'name' in body['server']:
msg = _("Server name is not defined")
return exc.HTTPBadRequest(msg)
- name = env['server']['name']
+ name = body['server']['name']
self._validate_server_name(name)
name = name.strip()
@@ -179,7 +160,7 @@ class Controller(common.OpenstackController):
display_description=name,
key_name=key_name,
key_data=key_data,
- metadata=env['server'].get('metadata', {}),
+ metadata=body['server'].get('metadata', {}),
injected_files=injected_files,
admin_password=password)
except quota.QuotaError as error:
@@ -193,18 +174,6 @@ class Controller(common.OpenstackController):
server['server']['adminPass'] = password
return server
- def _deserialize_create(self, request):
- """
- Deserialize a create request
-
- Overrides normal behavior in the case of xml content
- """
- if request.content_type == "application/xml":
- deserializer = ServerCreateRequestXMLDeserializer()
- return deserializer.deserialize(request.body)
- else:
- return self._deserialize(request.body, request.get_content_type())
-
def _get_injected_files(self, personality):
"""
Create a list of injected files from the personality attribute
@@ -254,24 +223,23 @@ class Controller(common.OpenstackController):
return utils.generate_password(16)
@scheduler_api.redirect_handler
- def update(self, req, id):
+ def update(self, req, id, body):
""" Updates the server name or password """
if len(req.body) == 0:
raise exc.HTTPUnprocessableEntity()
- inst_dict = self._deserialize(req.body, req.get_content_type())
- if not inst_dict:
+ if not body:
return faults.Fault(exc.HTTPUnprocessableEntity())
ctxt = req.environ['nova.context']
update_dict = {}
- if 'name' in inst_dict['server']:
- name = inst_dict['server']['name']
+ if 'name' in body['server']:
+ name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
- self._parse_update(ctxt, id, inst_dict, update_dict)
+ self._parse_update(ctxt, id, body, update_dict)
try:
self.compute_api.update(ctxt, id, **update_dict)
@@ -293,7 +261,7 @@ class Controller(common.OpenstackController):
pass
@scheduler_api.redirect_handler
- def action(self, req, id):
+ def action(self, req, id, body):
"""Multi-purpose method used to reboot, rebuild, or
resize a server"""
@@ -306,10 +274,9 @@ class Controller(common.OpenstackController):
'rebuild': self._action_rebuild,
}
- input_dict = self._deserialize(req.body, req.get_content_type())
for key in actions.keys():
- if key in input_dict:
- return actions[key](input_dict, req, id)
+ if key in body:
+ return actions[key](body, req, id)
return faults.Fault(exc.HTTPNotImplemented())
def _action_change_password(self, input_dict, req, id):
@@ -397,7 +364,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
@scheduler_api.redirect_handler
- def reset_network(self, req, id):
+ def reset_network(self, req, id, body):
"""
Reset networking on an instance (admin only).
@@ -412,7 +379,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
@scheduler_api.redirect_handler
- def inject_network_info(self, req, id):
+ def inject_network_info(self, req, id, body):
"""
Inject network info for an instance (admin only).
@@ -427,7 +394,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
@scheduler_api.redirect_handler
- def pause(self, req, id):
+ def pause(self, req, id, body):
""" Permit Admins to Pause the server. """
ctxt = req.environ['nova.context']
try:
@@ -439,7 +406,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
@scheduler_api.redirect_handler
- def unpause(self, req, id):
+ def unpause(self, req, id, body):
""" Permit Admins to Unpause the server. """
ctxt = req.environ['nova.context']
try:
@@ -451,7 +418,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
@scheduler_api.redirect_handler
- def suspend(self, req, id):
+ def suspend(self, req, id, body):
"""permit admins to suspend the server"""
context = req.environ['nova.context']
try:
@@ -463,7 +430,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
@scheduler_api.redirect_handler
- def resume(self, req, id):
+ def resume(self, req, id, body):
"""permit admins to resume the server from suspend"""
context = req.environ['nova.context']
try:
@@ -727,14 +694,16 @@ class ControllerV11(Controller):
image_id = common.get_id_from_href(image_ref)
personalities = info["rebuild"].get("personality", [])
- metadata = info["rebuild"].get("metadata", {})
+ metadata = info["rebuild"].get("metadata")
+ name = info["rebuild"].get("name")
- self._validate_metadata(metadata)
+ if metadata:
+ self._validate_metadata(metadata)
self._decode_personalities(personalities)
try:
- self.compute_api.rebuild(context, instance_id, image_id, metadata,
- personalities)
+ self.compute_api.rebuild(context, instance_id, image_id, name,
+ metadata, personalities)
except exception.BuildInProgress:
msg = _("Instance %d is currently being rebuilt.") % instance_id
LOG.debug(msg)
@@ -754,11 +723,8 @@ class ControllerV11(Controller):
raise exc.HTTPBadRequest(msg)
return password
- def get_default_xmlns(self, req):
- return common.XML_NS_V11
-
-class ServerCreateRequestXMLDeserializer(object):
+class ServerXMLDeserializer(wsgi.XMLDeserializer):
"""
Deserializer to handle xml-formatted server create requests.
@@ -766,7 +732,7 @@ class ServerCreateRequestXMLDeserializer(object):
and personality attributes
"""
- def deserialize(self, string):
+ def create(self, string):
"""Deserialize an xml-formatted server create request"""
dom = minidom.parseString(string)
server = self._extract_server(dom)
@@ -833,3 +799,43 @@ class ServerCreateRequestXMLDeserializer(object):
if child.nodeType == child.TEXT_NODE:
return child.nodeValue
return ""
+
+
+def create_resource(version='1.0'):
+ controller = {
+ '1.0': ControllerV10,
+ '1.1': ControllerV11,
+ }[version]()
+
+ metadata = {
+ "attributes": {
+ "server": ["id", "imageId", "name", "flavorId", "hostId",
+ "status", "progress", "adminPass", "flavorRef",
+ "imageRef"],
+ "link": ["rel", "type", "href"],
+ },
+ "dict_collections": {
+ "metadata": {"item_name": "meta", "item_key": "key"},
+ },
+ "list_collections": {
+ "public": {"item_name": "ip", "item_key": "addr"},
+ "private": {"item_name": "ip", "item_key": "addr"},
+ },
+ }
+
+ xmlns = {
+ '1.0': wsgi.XMLNS_V10,
+ '1.1': wsgi.XMLNS_V11,
+ }[version]
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata,
+ xmlns=xmlns),
+ }
+
+ deserializers = {
+ 'application/xml': ServerXMLDeserializer(),
+ }
+
+ return wsgi.Resource(controller, serializers=serializers,
+ deserializers=deserializers)
diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py
index 996db3648..4f11f8dfb 100644
--- a/nova/api/openstack/shared_ip_groups.py
+++ b/nova/api/openstack/shared_ip_groups.py
@@ -17,29 +17,13 @@
from webob import exc
-from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
-def _translate_keys(inst):
- """ Coerces a shared IP group instance into proper dictionary format """
- return dict(sharedIpGroup=inst)
-
-
-def _translate_detail_keys(inst):
- """ Coerces a shared IP group instance into proper dictionary format with
- correctly mapped attributes """
- return dict(sharedIpGroups=inst)
-
-
-class Controller(common.OpenstackController):
+class Controller(object):
""" The Shared IP Groups Controller for the Openstack API """
- _serialization_metadata = {
- 'application/xml': {
- 'attributes': {
- 'sharedIpGroup': []}}}
-
def index(self, req):
""" Returns a list of Shared IP Groups for the user """
raise faults.Fault(exc.HTTPNotImplemented())
@@ -48,7 +32,7 @@ class Controller(common.OpenstackController):
""" Shows in-depth information on a specific Shared IP Group """
raise faults.Fault(exc.HTTPNotImplemented())
- def update(self, req, id):
+ def update(self, req, id, body):
""" You can't update a Shared IP Group """
raise faults.Fault(exc.HTTPNotImplemented())
@@ -60,6 +44,10 @@ class Controller(common.OpenstackController):
""" Returns a complete list of Shared IP Groups """
raise faults.Fault(exc.HTTPNotImplemented())
- def create(self, req):
+ def create(self, req, body):
""" Creates a new Shared IP group """
raise faults.Fault(exc.HTTPNotImplemented())
+
+
+def create_resource():
+ return wsgi.Resource(Controller())
diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py
index 7ae4c3232..50975fc1f 100644
--- a/nova/api/openstack/users.py
+++ b/nova/api/openstack/users.py
@@ -20,8 +20,10 @@ from nova import flags
from nova import log as logging
from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import wsgi
from nova.auth import manager
+
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.api.openstack')
@@ -34,12 +36,7 @@ def _translate_keys(user):
admin=user.admin)
-class Controller(common.OpenstackController):
-
- _serialization_metadata = {
- 'application/xml': {
- "attributes": {
- "user": ["id", "name", "access", "secret", "admin"]}}}
+class Controller(object):
def __init__(self):
self.manager = manager.AuthManager()
@@ -81,23 +78,35 @@ class Controller(common.OpenstackController):
self.manager.delete_user(id)
return {}
- def create(self, req):
+ def create(self, req, body):
self._check_admin(req.environ['nova.context'])
- env = self._deserialize(req.body, req.get_content_type())
- is_admin = env['user'].get('admin') in ('T', 'True', True)
- name = env['user'].get('name')
- access = env['user'].get('access')
- secret = env['user'].get('secret')
+ is_admin = body['user'].get('admin') in ('T', 'True', True)
+ name = body['user'].get('name')
+ access = body['user'].get('access')
+ secret = body['user'].get('secret')
user = self.manager.create_user(name, access, secret, is_admin)
return dict(user=_translate_keys(user))
- def update(self, req, id):
+ def update(self, req, id, body):
self._check_admin(req.environ['nova.context'])
- env = self._deserialize(req.body, req.get_content_type())
- is_admin = env['user'].get('admin')
+ is_admin = body['user'].get('admin')
if is_admin is not None:
is_admin = is_admin in ('T', 'True', True)
- access = env['user'].get('access')
- secret = env['user'].get('secret')
+ access = body['user'].get('access')
+ secret = body['user'].get('secret')
self.manager.modify_user(id, access, secret, is_admin)
return dict(user=_translate_keys(self.manager.get_user(id)))
+
+
+def create_resource():
+ metadata = {
+ "attributes": {
+ "user": ["id", "name", "access", "secret", "admin"],
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py
index 3f9d91934..9db160102 100644
--- a/nova/api/openstack/versions.py
+++ b/nova/api/openstack/versions.py
@@ -18,13 +18,26 @@
import webob
import webob.dec
-from nova import wsgi
import nova.api.openstack.views.versions
+from nova.api.openstack import wsgi
-class Versions(wsgi.Application):
- @webob.dec.wsgify(RequestClass=wsgi.Request)
- def __call__(self, req):
+class Versions(wsgi.Resource):
+ def __init__(self):
+ metadata = {
+ "attributes": {
+ "version": ["status", "id"],
+ "link": ["rel", "href"],
+ }
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(metadata=metadata),
+ }
+
+ super(Versions, self).__init__(None, serializers=serializers)
+
+ def dispatch(self, request, *args):
"""Respond to a request for all OpenStack API versions."""
version_objs = [
{
@@ -37,24 +50,6 @@ class Versions(wsgi.Application):
},
]
- builder = nova.api.openstack.views.versions.get_view_builder(req)
+ builder = nova.api.openstack.views.versions.get_view_builder(request)
versions = [builder.build(version) for version in version_objs]
- response = dict(versions=versions)
-
- metadata = {
- "application/xml": {
- "attributes": {
- "version": ["status", "id"],
- "link": ["rel", "href"],
- }
- }
- }
-
- content_type = req.best_match_content_type()
- body = wsgi.Serializer(metadata).serialize(response, content_type)
-
- response = webob.Response()
- response.content_type = content_type
- response.body = body
-
- return response
+ return dict(versions=versions)
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
new file mode 100644
index 000000000..ddf4e6fa9
--- /dev/null
+++ b/nova/api/openstack/wsgi.py
@@ -0,0 +1,380 @@
+
+import json
+import webob
+from xml.dom import minidom
+
+from nova import exception
+from nova import log as logging
+from nova import utils
+from nova import wsgi
+
+
+XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
+XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
+
+LOG = logging.getLogger('nova.api.openstack.wsgi')
+
+
+class Request(webob.Request):
+ """Add some Openstack API-specific logic to the base webob.Request."""
+
+ def best_match_content_type(self):
+ """Determine the requested response content-type.
+
+ Based on the query extension then the Accept header.
+
+ """
+ supported = ('application/json', 'application/xml')
+
+ parts = self.path.rsplit('.', 1)
+ if len(parts) > 1:
+ ctype = 'application/{0}'.format(parts[1])
+ if ctype in supported:
+ return ctype
+
+ bm = self.accept.best_match(supported)
+
+ # default to application/json if we don't find a preference
+ return bm or 'application/json'
+
+ def get_content_type(self):
+ """Determine content type of the request body.
+
+ Does not do any body introspection, only checks header
+
+ """
+ if not "Content-Type" in self.headers:
+ raise exception.InvalidContentType(content_type=None)
+
+ allowed_types = ("application/xml", "application/json")
+ content_type = self.content_type
+
+ if content_type not in allowed_types:
+ raise exception.InvalidContentType(content_type=content_type)
+ else:
+ return content_type
+
+
+class TextDeserializer(object):
+ """Custom request body deserialization based on controller action name."""
+
+ def deserialize(self, datastring, action='default'):
+ """Find local deserialization method and parse request body."""
+ action_method = getattr(self, action, self.default)
+ return action_method(datastring)
+
+ def default(self, datastring):
+ """Default deserialization code should live here"""
+ raise NotImplementedError()
+
+
+class JSONDeserializer(TextDeserializer):
+
+ def default(self, datastring):
+ return utils.loads(datastring)
+
+
+class XMLDeserializer(TextDeserializer):
+
+ def __init__(self, metadata=None):
+ """
+ :param metadata: information needed to deserialize xml into
+ a dictionary.
+ """
+ super(XMLDeserializer, self).__init__()
+ self.metadata = metadata or {}
+
+ def default(self, datastring):
+ plurals = set(self.metadata.get('plurals', {}))
+ node = minidom.parseString(datastring).childNodes[0]
+ return {node.nodeName: self._from_xml_node(node, plurals)}
+
+ def _from_xml_node(self, node, listnames):
+ """Convert a minidom node to a simple Python type.
+
+ :param listnames: list of XML node names whose subnodes should
+ be considered list items.
+
+ """
+ if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
+ return node.childNodes[0].nodeValue
+ elif node.nodeName in listnames:
+ return [self._from_xml_node(n, listnames) for n in node.childNodes]
+ else:
+ result = dict()
+ for attr in node.attributes.keys():
+ result[attr] = node.attributes[attr].nodeValue
+ for child in node.childNodes:
+ if child.nodeType != node.TEXT_NODE:
+ result[child.nodeName] = self._from_xml_node(child,
+ listnames)
+ return result
+
+
+class RequestDeserializer(object):
+ """Break up a Request object into more useful pieces."""
+
+ def __init__(self, deserializers=None):
+ """
+ :param deserializers: dictionary of content-type-specific deserializers
+
+ """
+ self.deserializers = {
+ 'application/xml': XMLDeserializer(),
+ 'application/json': JSONDeserializer(),
+ }
+
+ self.deserializers.update(deserializers or {})
+
+ def deserialize(self, request):
+ """Extract necessary pieces of the request.
+
+ :param request: Request object
+ :returns tuple of expected controller action name, dictionary of
+ keyword arguments to pass to the controller, the expected
+ content type of the response
+
+ """
+ action_args = self.get_action_args(request.environ)
+ action = action_args.pop('action', None)
+
+ if request.method.lower() in ('post', 'put'):
+ if len(request.body) == 0:
+ action_args['body'] = None
+ else:
+ content_type = request.get_content_type()
+ deserializer = self.get_deserializer(content_type)
+
+ try:
+ body = deserializer.deserialize(request.body, action)
+ action_args['body'] = body
+ except exception.InvalidContentType:
+ action_args['body'] = None
+
+ accept = self.get_expected_content_type(request)
+
+ return (action, action_args, accept)
+
+ def get_deserializer(self, content_type):
+ try:
+ return self.deserializers[content_type]
+ except (KeyError, TypeError):
+ raise exception.InvalidContentType(content_type=content_type)
+
+ def get_expected_content_type(self, request):
+ return request.best_match_content_type()
+
+ def get_action_args(self, request_environment):
+ """Parse dictionary created by routes library."""
+ try:
+ args = request_environment['wsgiorg.routing_args'][1].copy()
+ except Exception:
+ return {}
+
+ try:
+ del args['controller']
+ except KeyError:
+ pass
+
+ try:
+ del args['format']
+ except KeyError:
+ pass
+
+ return args
+
+
+class DictSerializer(object):
+ """Custom response body serialization based on controller action name."""
+
+ def serialize(self, data, action='default'):
+ """Find local serialization method and encode response body."""
+ action_method = getattr(self, action, self.default)
+ return action_method(data)
+
+ def default(self, data):
+ """Default serialization code should live here"""
+ raise NotImplementedError()
+
+
+class JSONDictSerializer(DictSerializer):
+
+ def default(self, data):
+ return utils.dumps(data)
+
+
+class XMLDictSerializer(DictSerializer):
+
+ def __init__(self, metadata=None, xmlns=None):
+ """
+ :param metadata: information needed to deserialize xml into
+ a dictionary.
+ :param xmlns: XML namespace to include with serialized xml
+ """
+ super(XMLDictSerializer, self).__init__()
+ self.metadata = metadata or {}
+ self.xmlns = xmlns
+
+ def default(self, data):
+ # We expect data to contain a single key which is the XML root.
+ root_key = data.keys()[0]
+ doc = minidom.Document()
+ node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
+
+ xmlns = node.getAttribute('xmlns')
+ if not xmlns and self.xmlns:
+ node.setAttribute('xmlns', self.xmlns)
+
+ return node.toprettyxml(indent=' ')
+
+ def _to_xml_node(self, doc, metadata, nodename, data):
+ """Recursive method to convert data members to XML nodes."""
+ result = doc.createElement(nodename)
+
+ # Set the xml namespace if one is specified
+ # TODO(justinsb): We could also use prefixes on the keys
+ xmlns = metadata.get('xmlns', None)
+ if xmlns:
+ result.setAttribute('xmlns', xmlns)
+
+ #TODO(bcwaldon): accomplish this without a type-check
+ if type(data) is list:
+ collections = metadata.get('list_collections', {})
+ if nodename in collections:
+ metadata = collections[nodename]
+ for item in data:
+ node = doc.createElement(metadata['item_name'])
+ node.setAttribute(metadata['item_key'], str(item))
+ result.appendChild(node)
+ return result
+ singular = metadata.get('plurals', {}).get(nodename, None)
+ if singular is None:
+ if nodename.endswith('s'):
+ singular = nodename[:-1]
+ else:
+ singular = 'item'
+ for item in data:
+ node = self._to_xml_node(doc, metadata, singular, item)
+ result.appendChild(node)
+ #TODO(bcwaldon): accomplish this without a type-check
+ elif type(data) is dict:
+ collections = metadata.get('dict_collections', {})
+ if nodename in collections:
+ metadata = collections[nodename]
+ for k, v in data.items():
+ node = doc.createElement(metadata['item_name'])
+ node.setAttribute(metadata['item_key'], str(k))
+ text = doc.createTextNode(str(v))
+ node.appendChild(text)
+ result.appendChild(node)
+ return result
+ attrs = metadata.get('attributes', {}).get(nodename, {})
+ for k, v in data.items():
+ if k in attrs:
+ result.setAttribute(k, str(v))
+ else:
+ node = self._to_xml_node(doc, metadata, k, v)
+ result.appendChild(node)
+ else:
+ # Type is atom
+ node = doc.createTextNode(str(data))
+ result.appendChild(node)
+ return result
+
+
+class ResponseSerializer(object):
+ """Encode the necessary pieces into a response object"""
+
+ def __init__(self, serializers=None):
+ """
+ :param serializers: dictionary of content-type-specific serializers
+
+ """
+ self.serializers = {
+ 'application/xml': XMLDictSerializer(),
+ 'application/json': JSONDictSerializer(),
+ }
+ self.serializers.update(serializers or {})
+
+ def serialize(self, response_data, content_type):
+ """Serialize a dict into a string and wrap in a wsgi.Request object.
+
+ :param response_data: dict produced by the Controller
+ :param content_type: expected mimetype of serialized response body
+
+ """
+ response = webob.Response()
+ response.headers['Content-Type'] = content_type
+
+ serializer = self.get_serializer(content_type)
+ response.body = serializer.serialize(response_data)
+
+ return response
+
+ def get_serializer(self, content_type):
+ try:
+ return self.serializers[content_type]
+ except (KeyError, TypeError):
+ raise exception.InvalidContentType(content_type=content_type)
+
+
+class Resource(wsgi.Application):
+ """WSGI app that handles (de)serialization and controller dispatch.
+
+ WSGI app that reads routing information supplied by RoutesMiddleware
+ and calls the requested action method upon its controller. All
+ controller action methods must accept a 'req' argument, which is the
+ incoming wsgi.Request. If the operation is a PUT or POST, the controller
+ method must also accept a 'body' argument (the deserialized request body).
+ They may raise a webob.exc exception or return a dict, which will be
+ serialized by requested content type.
+
+ """
+ def __init__(self, controller, serializers=None, deserializers=None):
+ """
+ :param controller: object that implement methods created by routes lib
+ :param serializers: dict of content-type specific text serializers
+ :param deserializers: dict of content-type specific text deserializers
+
+ """
+ self.controller = controller
+ self.serializer = ResponseSerializer(serializers)
+ self.deserializer = RequestDeserializer(deserializers)
+
+ @webob.dec.wsgify(RequestClass=Request)
+ def __call__(self, request):
+ """WSGI method that controls (de)serialization and method dispatch."""
+
+ LOG.debug("%(method)s %(url)s" % {"method": request.method,
+ "url": request.url})
+
+ try:
+ action, action_args, accept = self.deserializer.deserialize(
+ request)
+ except exception.InvalidContentType:
+ return webob.exc.HTTPBadRequest(_("Unsupported Content-Type"))
+
+ action_result = self.dispatch(request, action, action_args)
+
+ #TODO(bcwaldon): find a more elegant way to pass through non-dict types
+ if type(action_result) is dict:
+ response = self.serializer.serialize(action_result, accept)
+ else:
+ response = action_result
+
+ try:
+ msg_dict = dict(url=request.url, status=response.status_int)
+ msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
+ except AttributeError:
+ msg_dict = dict(url=request.url)
+ msg = _("%(url)s returned a fault")
+
+ LOG.debug(msg)
+
+ return response
+
+ def dispatch(self, request, action, action_args):
+ """Find action-spefic method on controller and call it."""
+
+ controller_method = getattr(self.controller, action)
+ return controller_method(req=request, **action_args)
diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py
index af73d8f6d..8061b3b67 100644
--- a/nova/api/openstack/zones.py
+++ b/nova/api/openstack/zones.py
@@ -22,6 +22,7 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova.api.openstack import common
+from nova.api.openstack import wsgi
from nova.scheduler import api
@@ -52,12 +53,7 @@ def _scrub_zone(zone):
'deleted', 'deleted_at', 'updated_at'))
-class Controller(common.OpenstackController):
-
- _serialization_metadata = {
- 'application/xml': {
- "attributes": {
- "zone": ["id", "api_url", "name", "capabilities"]}}}
+class Controller(object):
def index(self, req):
"""Return all zones in brief"""
@@ -96,17 +92,15 @@ class Controller(common.OpenstackController):
api.zone_delete(req.environ['nova.context'], zone_id)
return {}
- def create(self, req):
+ def create(self, req, body):
context = req.environ['nova.context']
- env = self._deserialize(req.body, req.get_content_type())
- zone = api.zone_create(context, env["zone"])
+ zone = api.zone_create(context, body["zone"])
return dict(zone=_scrub_zone(zone))
- def update(self, req, id):
+ def update(self, req, id, body):
context = req.environ['nova.context']
- env = self._deserialize(req.body, req.get_content_type())
zone_id = int(id)
- zone = api.zone_update(context, zone_id, env["zone"])
+ zone = api.zone_update(context, zone_id, body["zone"])
return dict(zone=_scrub_zone(zone))
def select(self, req):
@@ -140,3 +134,18 @@ class Controller(common.OpenstackController):
cooked.append(dict(weight=entry['weight'],
blob=cipher_text))
return cooked
+
+
+def create_resource():
+ metadata = {
+ "attributes": {
+ "zone": ["id", "api_url", "name", "capabilities"],
+ },
+ }
+
+ serializers = {
+ 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10,
+ metadata=metadata),
+ }
+
+ return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template
index cda2ecc28..8170fcafe 100644
--- a/nova/auth/novarc.template
+++ b/nova/auth/novarc.template
@@ -1,4 +1,4 @@
-NOVA_KEY_DIR=$(pushd $(dirname $BASH_SOURCE)>/dev/null; pwd; popd>/dev/null)
+NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE}))
export EC2_ACCESS_KEY="%(access)s:%(project)s"
export EC2_SECRET_KEY="%(secret)s"
export EC2_URL="%(ec2)s"
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 4f2363387..263e44bab 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -91,7 +91,6 @@ class API(base.Base):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
-
"""
if injected_files is None:
return
@@ -140,7 +139,6 @@ class API(base.Base):
"""Create the number and type of instances requested.
Verifies that quota and other arguments are valid.
-
"""
if not instance_type:
instance_type = instance_types.get_default_instance_type()
@@ -268,10 +266,17 @@ class API(base.Base):
{"method": "run_instance",
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id,
- "instance_type": instance_type,
+ "request_spec": {
+ 'instance_type': instance_type,
+ 'filter':
+ 'nova.scheduler.host_filter.'
+ 'InstanceTypeFilter',
+ },
"availability_zone": availability_zone,
"injected_files": injected_files,
- "admin_password": admin_password}})
+ "admin_password": admin_password,
+ },
+ })
for group_id in security_groups:
self.trigger_security_group_members_refresh(elevated, group_id)
@@ -294,7 +299,6 @@ class API(base.Base):
already exist.
:param context: the security context
-
"""
try:
db.security_group_get_by_name(context, context.project_id,
@@ -327,7 +331,6 @@ class API(base.Base):
Sends an update request to each compute node for whom this is
relevant.
-
"""
# First, we get the security group rules that reference this group as
# the grantee..
@@ -374,7 +377,6 @@ class API(base.Base):
updated
:returns: None
-
"""
rv = self.db.instance_update(context, instance_id, kwargs)
return dict(rv.iteritems())
@@ -424,7 +426,6 @@ class API(base.Base):
Use this method instead of get() if this is the only operation you
intend to to. It will route to novaclient.get if the instance is not
found.
-
"""
return self.get(context, instance_id)
@@ -434,7 +435,6 @@ class API(base.Base):
If there is no filter and the context is an admin, it will retreive
all instances in the system.
-
"""
if reservation_id is not None:
return self.db.instance_get_all_by_reservation(
@@ -464,7 +464,6 @@ class API(base.Base):
compute worker
:returns: None
-
"""
if not params:
params = {}
@@ -514,7 +513,6 @@ class API(base.Base):
"""Snapshot the given instance.
:returns: A dict containing image metadata
-
"""
properties = {'instance_id': str(instance_id),
'user_id': str(context.user_id)}
@@ -530,7 +528,7 @@ class API(base.Base):
"""Reboot the given instance."""
self._cast_compute_message('reboot_instance', context, instance_id)
- def rebuild(self, context, instance_id, image_id, metadata=None,
+ def rebuild(self, context, instance_id, image_id, name=None, metadata=None,
files_to_inject=None):
"""Rebuild the given instance with the provided metadata."""
instance = db.api.instance_get(context, instance_id)
@@ -539,13 +537,16 @@ class API(base.Base):
msg = _("Instance already building")
raise exception.BuildInProgress(msg)
- metadata = metadata or {}
- self._check_metadata_properties_quota(context, metadata)
-
files_to_inject = files_to_inject or []
self._check_injected_file_quota(context, files_to_inject)
- self.db.instance_update(context, instance_id, {"metadata": metadata})
+ values = {}
+ if metadata is not None:
+ self._check_metadata_properties_quota(context, metadata)
+ values['metadata'] = metadata
+ if name is not None:
+ values['display_name'] = name
+ self.db.instance_update(context, instance_id, values)
rebuild_params = {
"image_id": image_id,
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index d1e01f275..3897b3a9e 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -331,7 +331,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception
@checks_instance_lock
- def rebuild_instance(self, context, instance_id, image_id):
+ def rebuild_instance(self, context, instance_id, **kwargs):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -349,7 +349,8 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_state(context, instance_id, power_state.BUILDING)
self.driver.destroy(instance_ref)
- instance_ref.image_id = image_id
+ instance_ref.image_id = kwargs.get('image_id')
+ instance_ref.injected_files = kwargs.get('injected_files', [])
self.driver.spawn(instance_ref)
self._update_image_id(context, instance_id, image_id)
diff --git a/nova/db/api.py b/nova/db/api.py
index 310c0bb09..4e0aa60a2 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -47,6 +47,8 @@ flags.DEFINE_string('instance_name_template', 'instance-%08x',
'Template string to be used to generate instance names')
flags.DEFINE_string('volume_name_template', 'volume-%08x',
'Template string to be used to generate instance names')
+flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x',
+ 'Template string to be used to generate snapshot names')
IMPL = utils.LazyPluggable(FLAGS['db_backend'],
@@ -881,6 +883,43 @@ def volume_update(context, volume_id, values):
####################
+def snapshot_create(context, values):
+ """Create a snapshot from the values dictionary."""
+ return IMPL.snapshot_create(context, values)
+
+
+def snapshot_destroy(context, snapshot_id):
+ """Destroy the snapshot or raise if it does not exist."""
+ return IMPL.snapshot_destroy(context, snapshot_id)
+
+
+def snapshot_get(context, snapshot_id):
+ """Get a snapshot or raise if it does not exist."""
+ return IMPL.snapshot_get(context, snapshot_id)
+
+
+def snapshot_get_all(context):
+ """Get all snapshots."""
+ return IMPL.snapshot_get_all(context)
+
+
+def snapshot_get_all_by_project(context, project_id):
+ """Get all snapshots belonging to a project."""
+ return IMPL.snapshot_get_all_by_project(context, project_id)
+
+
+def snapshot_update(context, snapshot_id, values):
+ """Set the given properties on an snapshot and update it.
+
+ Raises NotFound if snapshot does not exist.
+
+ """
+ return IMPL.snapshot_update(context, snapshot_id, values)
+
+
+####################
+
+
def security_group_get_all(context):
"""Get all security groups."""
return IMPL.security_group_get_all(context)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index e4dda5c12..c3a971a82 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -771,6 +771,15 @@ def fixed_ip_update(context, address, values):
###################
+def _metadata_refs(metadata_dict):
+ metadata_refs = []
+ if metadata_dict:
+ for k, v in metadata_dict.iteritems():
+ metadata_ref = models.InstanceMetadata()
+ metadata_ref['key'] = k
+ metadata_ref['value'] = v
+ metadata_refs.append(metadata_ref)
+ return metadata_refs
@require_context
@@ -780,15 +789,7 @@ def instance_create(context, values):
context - request context object
values - dict containing column values.
"""
- metadata = values.get('metadata')
- metadata_refs = []
- if metadata:
- for k, v in metadata.iteritems():
- metadata_ref = models.InstanceMetadata()
- metadata_ref['key'] = k
- metadata_ref['value'] = v
- metadata_refs.append(metadata_ref)
- values['metadata'] = metadata_refs
+ values['metadata'] = _metadata_refs(values.get('metadata'))
instance_ref = models.Instance()
instance_ref.update(values)
@@ -1010,6 +1011,11 @@ def instance_set_state(context, instance_id, state, description=None):
@require_context
def instance_update(context, instance_id, values):
session = get_session()
+ metadata = values.get('metadata')
+ if metadata is not None:
+ instance_metadata_delete_all(context, instance_id)
+ instance_metadata_update_or_create(context, instance_id,
+ values.pop('metadata'))
with session.begin():
instance_ref = instance_get(context, instance_id, session=session)
instance_ref.update(values)
@@ -1790,6 +1796,82 @@ def volume_update(context, volume_id, values):
@require_context
+def snapshot_create(context, values):
+ snapshot_ref = models.Snapshot()
+ snapshot_ref.update(values)
+
+ session = get_session()
+ with session.begin():
+ snapshot_ref.save(session=session)
+ return snapshot_ref
+
+
+@require_admin_context
+def snapshot_destroy(context, snapshot_id):
+ session = get_session()
+ with session.begin():
+ session.query(models.Snapshot).\
+ filter_by(id=snapshot_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def snapshot_get(context, snapshot_id, session=None):
+ if not session:
+ session = get_session()
+ result = None
+
+ if is_admin_context(context):
+ result = session.query(models.Snapshot).\
+ filter_by(id=snapshot_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+ elif is_user_context(context):
+ result = session.query(models.Snapshot).\
+ filter_by(project_id=context.project_id).\
+ filter_by(id=snapshot_id).\
+ filter_by(deleted=False).\
+ first()
+ if not result:
+ raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
+
+ return result
+
+
+@require_admin_context
+def snapshot_get_all(context):
+ session = get_session()
+ return session.query(models.Snapshot).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+@require_context
+def snapshot_get_all_by_project(context, project_id):
+ authorize_project_context(context, project_id)
+
+ session = get_session()
+ return session.query(models.Snapshot).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+@require_context
+def snapshot_update(context, snapshot_id, values):
+ session = get_session()
+ with session.begin():
+ snapshot_ref = snapshot_get(context, snapshot_id, session=session)
+ snapshot_ref.update(values)
+ snapshot_ref.save(session=session)
+
+
+###################
+
+
+@require_context
def security_group_get_all(context):
session = get_session()
return session.query(models.SecurityGroup).\
@@ -2550,6 +2632,17 @@ def instance_metadata_delete(context, instance_id, key):
@require_context
+def instance_metadata_delete_all(context, instance_id):
+ session = get_session()
+ session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ update({'deleted': True,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
def instance_metadata_get_item(context, instance_id, key):
session = get_session()
@@ -2568,6 +2661,9 @@ def instance_metadata_get_item(context, instance_id, key):
@require_context
def instance_metadata_update_or_create(context, instance_id, metadata):
session = get_session()
+
+ original_metadata = instance_metadata_get(context, instance_id)
+
meta_ref = None
for key, value in metadata.iteritems():
try:
@@ -2579,4 +2675,5 @@ def instance_metadata_update_or_create(context, instance_id, metadata):
"instance_id": instance_id,
"deleted": 0})
meta_ref.save(session=session)
+
return metadata
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
index a2d8192ca..5d0593f2e 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py
@@ -160,7 +160,7 @@ def convert_backward(migrate_engine, old_quotas, new_quotas):
'project_id': quota.project_id,
'created_at': quota.created_at,
'updated_at': quota.updated_at,
- quota.resource: quota.hard_limit
+ quota.resource: quota.hard_limit,
}
else:
quotas[quota.project_id]['created_at'] = earliest(
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py
index a169afb40..73c76f666 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py
@@ -14,23 +14,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import Column, Integer, MetaData, String, Table
-#from nova import log as logging
+from sqlalchemy import MetaData, Table
meta = MetaData()
-c_manageent = Column('server_manageent_url',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=True)
-
-c_management = Column('server_management_url',
- String(length=255, convert_unicode=False,
- assert_unicode=None, unicode_error=None,
- _warn_on_bytestring=False),
- nullable=True)
-
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
@@ -40,11 +27,8 @@ def upgrade(migrate_engine):
tokens = Table('auth_tokens', meta, autoload=True,
autoload_with=migrate_engine)
- tokens.create_column(c_management)
- migrate_engine.execute(tokens.update()
- .values(server_management_url=tokens.c.server_manageent_url))
-
- tokens.c.server_manageent_url.drop()
+ c_manageent = tokens.c.server_manageent_url
+ c_manageent.alter(name='server_management_url')
def downgrade(migrate_engine):
@@ -53,8 +37,5 @@ def downgrade(migrate_engine):
tokens = Table('auth_tokens', meta, autoload=True,
autoload_with=migrate_engine)
- tokens.create_column(c_manageent)
- migrate_engine.execute(tokens.update()
- .values(server_manageent_url=tokens.c.server_management_url))
-
- tokens.c.server_management_url.drop()
+ c_management = tokens.c.server_management_url
+ c_management.alter(name='server_manageent_url')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py
new file mode 100644
index 000000000..f16d6db56
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/019_add_volume_snapshot_support.py
@@ -0,0 +1,70 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 MORITA Kazutaka.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Table, MetaData
+from sqlalchemy import Integer, DateTime, Boolean, String
+
+from nova import log as logging
+
+meta = MetaData()
+
+snapshots = Table('snapshots', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('volume_id', Integer(), nullable=False),
+ Column('user_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('status',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('progress',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('volume_size', Integer()),
+ Column('scheduled_at', DateTime(timezone=False)),
+ Column('display_name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('display_description',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ try:
+ snapshots.create()
+ except Exception:
+ logging.info(repr(snapshots))
+ logging.exception('Exception while creating table')
+ meta.drop_all(tables=[snapshots])
+ raise
+
+
+def downgrade(migrate_engine):
+ # Operations to reverse the above upgrade go here.
+ snapshots.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py
new file mode 100644
index 000000000..10bd9d5c9
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py
@@ -0,0 +1,47 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 MORITA Kazutaka.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Table, MetaData, Integer
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Table stub-definitions
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+#
+volumes = Table('volumes', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# New Column
+#
+
+snapshot_id = Column('snapshot_id', Integer())
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ # Add columns to existing tables
+ volumes.create_column(snapshot_id)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 1215448f8..22a1a84e8 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -287,6 +287,8 @@ class Volume(BASE, NovaBase):
user_id = Column(String(255))
project_id = Column(String(255))
+ snapshot_id = Column(String(255))
+
host = Column(String(255)) # , ForeignKey('hosts.id'))
size = Column(Integer)
availability_zone = Column(String(255)) # TODO(vish): foreign key?
@@ -329,6 +331,31 @@ class Quota(BASE, NovaBase):
hard_limit = Column(Integer, nullable=True)
+class Snapshot(BASE, NovaBase):
+ """Represents a block storage device that can be attached to a vm."""
+ __tablename__ = 'snapshots'
+ id = Column(Integer, primary_key=True, autoincrement=True)
+
+ @property
+ def name(self):
+ return FLAGS.snapshot_name_template % self.id
+
+ @property
+ def volume_name(self):
+ return FLAGS.volume_name_template % self.volume_id
+
+ user_id = Column(String(255))
+ project_id = Column(String(255))
+
+ volume_id = Column(Integer)
+ status = Column(String(255))
+ progress = Column(String(255))
+ volume_size = Column(Integer)
+
+ display_name = Column(String(255))
+ display_description = Column(String(255))
+
+
class ExportDevice(BASE, NovaBase):
"""Represates a shelf and blade that a volume can be exported on."""
__tablename__ = 'export_devices'
diff --git a/nova/exception.py b/nova/exception.py
index 56c20d111..d3d58f3b2 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -271,6 +271,14 @@ class VolumeNotFoundForInstance(VolumeNotFound):
message = _("Volume not found for instance %(instance_id)s.")
+class SnapshotNotFound(NotFound):
+ message = _("Snapshot %(snapshot_id)s could not be found.")
+
+
+class VolumeIsBusy(Error):
+ message = _("deleting volume %(volume_name)s that has snapshot")
+
+
class ExportDeviceNotFoundForVolume(NotFound):
message = _("No export device found for volume %(volume_id)s.")
@@ -465,9 +473,8 @@ class ZoneNotFound(NotFound):
message = _("Zone %(zone_id)s could not be found.")
-class SchedulerHostFilterDriverNotFound(NotFound):
- message = _("Scheduler Host Filter Driver %(driver_name)s could"
- " not be found.")
+class SchedulerHostFilterNotFound(NotFound):
+ message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class InstanceMetadataNotFound(NotFound):
diff --git a/nova/flags.py b/nova/flags.py
index 9eaac5596..d5090edba 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -296,6 +296,7 @@ DEFINE_bool('fake_network', False,
'should we use fake network devices and addresses')
DEFINE_string('rabbit_host', 'localhost', 'rabbit host')
DEFINE_integer('rabbit_port', 5672, 'rabbit port')
+DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL')
DEFINE_string('rabbit_userid', 'guest', 'rabbit userid')
DEFINE_string('rabbit_password', 'guest', 'rabbit password')
DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
diff --git a/nova/image/fake.py b/nova/image/fake.py
index b400b2adb..8e84c8597 100644
--- a/nova/image/fake.py
+++ b/nova/image/fake.py
@@ -52,11 +52,11 @@ class FakeImageService(service.BaseImageService):
self.create(None, image)
super(FakeImageService, self).__init__()
- def index(self, context):
+ def index(self, context, filters=None):
"""Returns list of images."""
return copy.deepcopy(self.images.values())
- def detail(self, context):
+ def detail(self, context, filters=None):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 193e37273..dec797619 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -58,23 +58,23 @@ class GlanceImageService(service.BaseImageService):
else:
self.client = client
- def index(self, context):
+ def index(self, context, filters=None):
"""Calls out to Glance for a list of images available."""
# NOTE(sirp): We need to use `get_images_detailed` and not
# `get_images` here because we need `is_public` and `properties`
# included so we can filter by user
filtered = []
- image_metas = self.client.get_images_detailed()
+ image_metas = self.client.get_images_detailed(filters=filters)
for image_meta in image_metas:
if self._is_image_available(context, image_meta):
meta_subset = utils.subset_dict(image_meta, ('id', 'name'))
filtered.append(meta_subset)
return filtered
- def detail(self, context):
+ def detail(self, context, filters=None):
"""Calls out to Glance for a list of detailed image information."""
filtered = []
- image_metas = self.client.get_images_detailed()
+ image_metas = self.client.get_images_detailed(filters=filters)
for image_meta in image_metas:
if self._is_image_available(context, image_meta):
base_image_meta = self._translate_to_base(image_meta)
diff --git a/nova/image/local.py b/nova/image/local.py
index 918180bae..677d5302b 100644
--- a/nova/image/local.py
+++ b/nova/image/local.py
@@ -63,7 +63,7 @@ class LocalImageService(service.BaseImageService):
images.append(unhexed_image_id)
return images
- def index(self, context):
+ def index(self, context, *args, **kwargs):
filtered = []
image_metas = self.detail(context)
for image_meta in image_metas:
@@ -71,7 +71,7 @@ class LocalImageService(service.BaseImageService):
filtered.append(meta)
return filtered
- def detail(self, context):
+ def detail(self, context, *args, **kwargs):
images = []
for image_id in self._ids():
try:
diff --git a/nova/image/s3.py b/nova/image/s3.py
index c38c58d95..ec8852f09 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -31,12 +31,14 @@ import eventlet
from nova import crypto
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
from nova.auth import manager
from nova.image import service
from nova.api.ec2 import ec2utils
+LOG = logging.getLogger("nova.image.s3")
FLAGS = flags.FLAGS
flags.DEFINE_string('image_decryption_dir', '/tmp',
'parent dir for tempdir used for image decryption')
@@ -161,43 +163,83 @@ class S3ImageService(service.BaseImageService):
def delayed_create():
"""This handles the fetching and decrypting of the part files."""
- parts = []
- for fn_element in manifest.find('image').getiterator('filename'):
- part = self._download_file(bucket, fn_element.text, image_path)
- parts.append(part)
-
- # NOTE(vish): this may be suboptimal, should we use cat?
- encrypted_filename = os.path.join(image_path, 'image.encrypted')
- with open(encrypted_filename, 'w') as combined:
- for filename in parts:
- with open(filename) as part:
- shutil.copyfileobj(part, combined)
-
- metadata['properties']['image_state'] = 'decrypting'
+ metadata['properties']['image_state'] = 'downloading'
self.service.update(context, image_id, metadata)
- hex_key = manifest.find('image/ec2_encrypted_key').text
- encrypted_key = binascii.a2b_hex(hex_key)
- hex_iv = manifest.find('image/ec2_encrypted_iv').text
- encrypted_iv = binascii.a2b_hex(hex_iv)
+ try:
+ parts = []
+ elements = manifest.find('image').getiterator('filename')
+ for fn_element in elements:
+ part = self._download_file(bucket,
+ fn_element.text,
+ image_path)
+ parts.append(part)
+
+ # NOTE(vish): this may be suboptimal, should we use cat?
+ enc_filename = os.path.join(image_path, 'image.encrypted')
+ with open(enc_filename, 'w') as combined:
+ for filename in parts:
+ with open(filename) as part:
+ shutil.copyfileobj(part, combined)
+
+ except Exception:
+ LOG.error(_("Failed to download %(image_location)s "
+ "to %(image_path)s"), locals())
+ metadata['properties']['image_state'] = 'failed_download'
+ self.service.update(context, image_id, metadata)
+ raise
- # FIXME(vish): grab key from common service so this can run on
- # any host.
- cloud_pk = crypto.key_path(context.project_id)
+ metadata['properties']['image_state'] = 'decrypting'
+ self.service.update(context, image_id, metadata)
- decrypted_filename = os.path.join(image_path, 'image.tar.gz')
- self._decrypt_image(encrypted_filename, encrypted_key,
- encrypted_iv, cloud_pk, decrypted_filename)
+ try:
+ hex_key = manifest.find('image/ec2_encrypted_key').text
+ encrypted_key = binascii.a2b_hex(hex_key)
+ hex_iv = manifest.find('image/ec2_encrypted_iv').text
+ encrypted_iv = binascii.a2b_hex(hex_iv)
+
+ # FIXME(vish): grab key from common service so this can run on
+ # any host.
+ cloud_pk = crypto.key_path(context.project_id)
+
+ dec_filename = os.path.join(image_path, 'image.tar.gz')
+ self._decrypt_image(enc_filename, encrypted_key,
+ encrypted_iv, cloud_pk,
+ dec_filename)
+ except Exception:
+ LOG.error(_("Failed to decrypt %(image_location)s "
+ "to %(image_path)s"), locals())
+ metadata['properties']['image_state'] = 'failed_decrypt'
+ self.service.update(context, image_id, metadata)
+ raise
metadata['properties']['image_state'] = 'untarring'
self.service.update(context, image_id, metadata)
- unz_filename = self._untarzip_image(image_path, decrypted_filename)
+ try:
+ unz_filename = self._untarzip_image(image_path, dec_filename)
+ except Exception:
+ LOG.error(_("Failed to untar %(image_location)s "
+ "to %(image_path)s"), locals())
+ metadata['properties']['image_state'] = 'failed_untar'
+ self.service.update(context, image_id, metadata)
+ raise
metadata['properties']['image_state'] = 'uploading'
- with open(unz_filename) as image_file:
- self.service.update(context, image_id, metadata, image_file)
+ self.service.update(context, image_id, metadata)
+ try:
+ with open(unz_filename) as image_file:
+ self.service.update(context, image_id,
+ metadata, image_file)
+ except Exception:
+ LOG.error(_("Failed to upload %(image_location)s "
+ "to %(image_path)s"), locals())
+ metadata['properties']['image_state'] = 'failed_upload'
+ self.service.update(context, image_id, metadata)
+ raise
+
metadata['properties']['image_state'] = 'available'
+ metadata['status'] = 'active'
self.service.update(context, image_id, metadata)
shutil.rmtree(image_path)
diff --git a/nova/image/service.py b/nova/image/service.py
index ab6749049..5361cfc89 100644
--- a/nova/image/service.py
+++ b/nova/image/service.py
@@ -46,7 +46,7 @@ class BaseImageService(object):
# the ImageService subclass
SERVICE_IMAGE_ATTRS = []
- def index(self, context):
+ def index(self, context, *args, **kwargs):
"""List images.
:returns: a sequence of mappings with the following signature
@@ -55,7 +55,7 @@ class BaseImageService(object):
"""
raise NotImplementedError
- def detail(self, context):
+ def detail(self, context, *args, **kwargs):
"""Detailed information about an images.
:returns: a sequence of mappings with the following signature
diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py
index dd6327c8f..76025a1e3 100644
--- a/nova/objectstore/s3server.py
+++ b/nova/objectstore/s3server.py
@@ -81,7 +81,7 @@ class S3Application(wsgi.Router):
super(S3Application, self).__init__(mapper)
-class BaseRequestHandler(wsgi.Controller):
+class BaseRequestHandler(object):
"""Base class emulating Tornado's web framework pattern in WSGI.
This is a direct port of Tornado's implementation, so some key decisions
diff --git a/nova/rpc.py b/nova/rpc.py
index c5277c6a9..2e78a31e7 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -65,6 +65,7 @@ class Connection(carrot_connection.BrokerConnection):
if new or not hasattr(cls, '_instance'):
params = dict(hostname=FLAGS.rabbit_host,
port=FLAGS.rabbit_port,
+ ssl=FLAGS.rabbit_use_ssl,
userid=FLAGS.rabbit_userid,
password=FLAGS.rabbit_password,
virtual_host=FLAGS.rabbit_virtual_host)
diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py
index 483f3225c..7d6ee0ee3 100644
--- a/nova/scheduler/host_filter.py
+++ b/nova/scheduler/host_filter.py
@@ -14,8 +14,8 @@
# under the License.
"""
-Host Filter is a driver mechanism for requesting instance resources.
-Three drivers are included: AllHosts, Flavor & JSON. AllHosts just
+Host Filter is a mechanism for requesting instance resources.
+Three filters are included: AllHosts, Flavor & JSON. AllHosts just
returns the full, unfiltered list of hosts. Flavor is a hard coded
matching mechanism based on flavor criteria and JSON is an ad-hoc
filter grammar.
@@ -42,17 +42,18 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
+from nova.scheduler import zone_aware_scheduler
LOG = logging.getLogger('nova.scheduler.host_filter')
FLAGS = flags.FLAGS
-flags.DEFINE_string('default_host_filter_driver',
+flags.DEFINE_string('default_host_filter',
'nova.scheduler.host_filter.AllHostsFilter',
- 'Which driver to use for filtering hosts.')
+ 'Which filter to use for filtering hosts.')
class HostFilter(object):
- """Base class for host filter drivers."""
+ """Base class for host filters."""
def instance_type_to_filter(self, instance_type):
"""Convert instance_type into a filter for most common use-case."""
@@ -63,14 +64,15 @@ class HostFilter(object):
raise NotImplementedError()
def _full_name(self):
- """module.classname of the filter driver"""
+ """module.classname of the filter."""
return "%s.%s" % (self.__module__, self.__class__.__name__)
class AllHostsFilter(HostFilter):
- """NOP host filter driver. Returns all hosts in ZoneManager.
+ """ NOP host filter. Returns all hosts in ZoneManager.
This essentially does what the old Scheduler+Chance used
- to give us."""
+ to give us.
+ """
def instance_type_to_filter(self, instance_type):
"""Return anything to prevent base-class from raising
@@ -83,8 +85,8 @@ class AllHostsFilter(HostFilter):
for host, services in zone_manager.service_states.iteritems()]
-class FlavorFilter(HostFilter):
- """HostFilter driver hard-coded to work with flavors."""
+class InstanceTypeFilter(HostFilter):
+ """HostFilter hard-coded to work with InstanceType records."""
def instance_type_to_filter(self, instance_type):
"""Use instance_type to filter hosts."""
@@ -98,9 +100,10 @@ class FlavorFilter(HostFilter):
capabilities = services.get('compute', {})
host_ram_mb = capabilities['host_memory_free']
disk_bytes = capabilities['disk_available']
- if host_ram_mb >= instance_type['memory_mb'] and \
- disk_bytes >= instance_type['local_gb']:
- selected_hosts.append((host, capabilities))
+ spec_ram = instance_type['memory_mb']
+ spec_disk = instance_type['local_gb']
+ if host_ram_mb >= spec_ram and disk_bytes >= spec_disk:
+ selected_hosts.append((host, capabilities))
return selected_hosts
#host entries (currently) are like:
@@ -109,15 +112,15 @@ class FlavorFilter(HostFilter):
# 'host_memory_total': 8244539392,
# 'host_memory_overhead': 184225792,
# 'host_memory_free': 3868327936,
-# 'host_memory_free_computed': 3840843776},
-# 'host_other-config': {},
+# 'host_memory_free_computed': 3840843776,
+# 'host_other_config': {},
# 'host_ip_address': '192.168.1.109',
# 'host_cpu_info': {},
# 'disk_available': 32954957824,
# 'disk_total': 50394562560,
-# 'disk_used': 17439604736},
+# 'disk_used': 17439604736,
# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
-# 'host_name-label': 'xs-mini'}
+# 'host_name_label': 'xs-mini'}
# instance_type table has:
#name = Column(String(255), unique=True)
@@ -131,8 +134,9 @@ class FlavorFilter(HostFilter):
class JsonFilter(HostFilter):
- """Host Filter driver to allow simple JSON-based grammar for
- selecting hosts."""
+ """Host Filter to allow simple JSON-based grammar for
+ selecting hosts.
+ """
def _equals(self, args):
"""First term is == all the other terms."""
@@ -222,13 +226,14 @@ class JsonFilter(HostFilter):
required_disk = instance_type['local_gb']
query = ['and',
['>=', '$compute.host_memory_free', required_ram],
- ['>=', '$compute.disk_available', required_disk]
+ ['>=', '$compute.disk_available', required_disk],
]
return (self._full_name(), json.dumps(query))
def _parse_string(self, string, host, services):
"""Strings prefixed with $ are capability lookups in the
- form '$service.capability[.subcap*]'"""
+ form '$service.capability[.subcap*]'
+ """
if not string:
return None
if string[0] != '$':
@@ -271,18 +276,48 @@ class JsonFilter(HostFilter):
return hosts
-DRIVERS = [AllHostsFilter, FlavorFilter, JsonFilter]
+FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter]
-def choose_driver(driver_name=None):
- """Since the caller may specify which driver to use we need
- to have an authoritative list of what is permissible. This
- function checks the driver name against a predefined set
- of acceptable drivers."""
+def choose_host_filter(filter_name=None):
+ """Since the caller may specify which filter to use we need
+ to have an authoritative list of what is permissible. This
+ function checks the filter name against a predefined set
+ of acceptable filters.
+ """
- if not driver_name:
- driver_name = FLAGS.default_host_filter_driver
- for driver in DRIVERS:
- if "%s.%s" % (driver.__module__, driver.__name__) == driver_name:
- return driver()
- raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name)
+ if not filter_name:
+ filter_name = FLAGS.default_host_filter
+ for filter_class in FILTERS:
+ host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__)
+ if host_match == filter_name:
+ return filter_class()
+ raise exception.SchedulerHostFilterNotFound(filter_name=filter_name)
+
+
+class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
+ """The HostFilterScheduler uses the HostFilter to filter
+ hosts for weighing. The particular filter used may be passed in
+ as an argument or the default will be used.
+
+ request_spec = {'filter': <Filter name>,
+ 'instance_type': <InstanceType dict>}
+ """
+
+ def filter_hosts(self, num, request_spec):
+ """Filter the full host list (from the ZoneManager)"""
+ filter_name = request_spec.get('filter', None)
+ host_filter = choose_host_filter(filter_name)
+
+ # TODO(sandy): We're only using InstanceType-based specs
+ # currently. Later we'll need to snoop for more detailed
+ # host filter requests.
+ instance_type = request_spec['instance_type']
+ name, query = host_filter.instance_type_to_filter(instance_type)
+ return host_filter.filter_hosts(self.zone_manager, query)
+
+ def weigh_hosts(self, num, request_spec, hosts):
+ """Derived classes must override this method and return
+ a lists of hosts in [{weight, hostname}] format.
+ """
+ return [dict(weight=1, hostname=host) for host, caps in hosts]
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 55cd7208b..bd40e73c0 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -83,11 +83,16 @@ class SchedulerManager(manager.Manager):
except AttributeError:
host = self.driver.schedule(elevated, topic, *args, **kwargs)
+ if not host:
+ LOG.debug(_("%(topic)s %(method)s handled in Scheduler")
+ % locals())
+ return
+
rpc.cast(context,
db.queue_get_for(context, topic, host),
{"method": method,
"args": kwargs})
- LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals())
+ LOG.debug(_("Casted to %(topic)s %(host)s for %(method)s") % locals())
# NOTE (masumotok) : This method should be moved to nova.api.ec2.admin.
# Based on bexar design summit discussion,
diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py
index b3d230bd2..bc67c7794 100644
--- a/nova/scheduler/zone_aware_scheduler.py
+++ b/nova/scheduler/zone_aware_scheduler.py
@@ -22,7 +22,9 @@ across zones. There are two expansion points to this class for:
import operator
+from nova import db
from nova import log as logging
+from nova import rpc
from nova.scheduler import api
from nova.scheduler import driver
@@ -36,7 +38,7 @@ class ZoneAwareScheduler(driver.Scheduler):
"""Call novaclient zone method. Broken out for testing."""
return api.call_zone_method(context, method, specs=specs)
- def schedule_run_instance(self, context, topic='compute', specs={},
+ def schedule_run_instance(self, context, instance_id, request_spec,
*args, **kwargs):
"""This method is called from nova.compute.api to provision
an instance. However we need to look at the parameters being
@@ -44,56 +46,83 @@ class ZoneAwareScheduler(driver.Scheduler):
1. Create a Build Plan and then provision, or
2. Use the Build Plan information in the request parameters
to simply create the instance (either in this zone or
- a child zone)."""
+ a child zone).
+ """
- if 'blob' in specs:
- return self.provision_instance(context, topic, specs)
+ # TODO(sandy): We'll have to look for richer specs at some point.
- # Create build plan and provision ...
- build_plan = self.select(context, specs)
- for item in build_plan:
- self.provision_instance(context, topic, item)
+ if 'blob' in request_spec:
+ self.provision_resource(context, request_spec, instance_id, kwargs)
+ return None
- def provision_instance(context, topic, item):
- """Create the requested instance in this Zone or a child zone."""
- pass
+ # Create build plan and provision ...
+ build_plan = self.select(context, request_spec)
+ if not build_plan:
+ raise driver.NoValidHost(_('No hosts were available'))
- def select(self, context, *args, **kwargs):
+ for item in build_plan:
+ self.provision_resource(context, item, instance_id, kwargs)
+
+ # Returning None short-circuits the routing to Compute (since
+ # we've already done it here)
+ return None
+
+ def provision_resource(self, context, item, instance_id, kwargs):
+ """Create the requested resource in this Zone or a child zone."""
+ if "hostname" in item:
+ host = item['hostname']
+ kwargs['instance_id'] = instance_id
+ rpc.cast(context,
+ db.queue_get_for(context, "compute", host),
+ {"method": "run_instance",
+ "args": kwargs})
+ LOG.debug(_("Casted to compute %(host)s for run_instance")
+ % locals())
+ else:
+ # TODO(sandy) Provision in child zone ...
+ LOG.warning(_("Provision to Child Zone not supported (yet)"))
+ pass
+
+ def select(self, context, request_spec, *args, **kwargs):
"""Select returns a list of weights and zone/host information
corresponding to the best hosts to service the request. Any
child zone information has been encrypted so as not to reveal
- anything about the children."""
- return self._schedule(context, "compute", *args, **kwargs)
+ anything about the children.
+ """
+ return self._schedule(context, "compute", request_spec,
+ *args, **kwargs)
- def schedule(self, context, topic, *args, **kwargs):
+ # TODO(sandy): We're only focused on compute instances right now,
+ # so we don't implement the default "schedule()" method required
+ # of Schedulers.
+ def schedule(self, context, topic, request_spec, *args, **kwargs):
"""The schedule() contract requires we return the one
best-suited host for this request.
"""
- res = self._schedule(context, topic, *args, **kwargs)
- # TODO(sirp): should this be a host object rather than a weight-dict?
- if not res:
- raise driver.NoValidHost(_('No hosts were available'))
- return res[0]
+ raise driver.NoValidHost(_('No hosts were available'))
- def _schedule(self, context, topic, *args, **kwargs):
+ def _schedule(self, context, topic, request_spec, *args, **kwargs):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
- #TODO(sandy): extract these from args.
+ if topic != "compute":
+ raise NotImplemented(_("Zone Aware Scheduler only understands "
+ "Compute nodes (for now)"))
+
+ #TODO(sandy): how to infer this from OS API params?
num_instances = 1
- specs = {}
# Filter local hosts based on requirements ...
- host_list = self.filter_hosts(num_instances, specs)
+ host_list = self.filter_hosts(num_instances, request_spec)
# then weigh the selected hosts.
# weighted = [{weight=weight, name=hostname}, ...]
- weighted = self.weigh_hosts(num_instances, specs, host_list)
+ weighted = self.weigh_hosts(num_instances, request_spec, host_list)
# Next, tack on the best weights from the child zones ...
child_results = self._call_zone_method(context, "select",
- specs=specs)
+ specs=request_spec)
for child_zone, result in child_results:
for weighting in result:
# Remember the child_zone so we can get back to
@@ -108,12 +137,14 @@ class ZoneAwareScheduler(driver.Scheduler):
weighted.sort(key=operator.itemgetter('weight'))
return weighted
- def filter_hosts(self, num, specs):
+ def filter_hosts(self, num, request_spec):
"""Derived classes must override this method and return
- a list of hosts in [(hostname, capability_dict)] format."""
+ a list of hosts in [(hostname, capability_dict)] format.
+ """
raise NotImplemented()
- def weigh_hosts(self, num, specs, hosts):
+ def weigh_hosts(self, num, request_spec, hosts):
"""Derived classes must override this method and return
- a lists of hosts in [{weight, hostname}] format."""
+ a lists of hosts in [{weight, hostname}] format.
+ """
raise NotImplemented()
diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py
index dbdd0928a..03aad007a 100644
--- a/nova/tests/api/openstack/extensions/foxinsocks.py
+++ b/nova/tests/api/openstack/extensions/foxinsocks.py
@@ -17,12 +17,10 @@
import json
-from nova import wsgi
-
from nova.api.openstack import extensions
-class FoxInSocksController(wsgi.Controller):
+class FoxInSocksController(object):
def index(self, req):
return "Try to say this Mr. Knox, sir..."
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index bf51239e6..8e0156afa 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -166,11 +166,11 @@ def stub_out_glance(stubs, initial_fixtures=None):
def __init__(self, initial_fixtures):
self.fixtures = initial_fixtures or []
- def fake_get_images(self):
+ def fake_get_images(self, filters=None):
return [dict(id=f['id'], name=f['name'])
for f in self.fixtures]
- def fake_get_images_detailed(self):
+ def fake_get_images_detailed(self, filters=None):
return copy.deepcopy(self.fixtures)
def fake_get_image_meta(self, image_id):
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index 544298602..60914c0a3 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -26,15 +26,15 @@ from nova import flags
from nova.api import openstack
from nova.api.openstack import extensions
from nova.api.openstack import flavors
+from nova.api.openstack import wsgi
from nova.tests.api.openstack import fakes
-import nova.wsgi
FLAGS = flags.FLAGS
response_body = "Try to say this Mr. Knox, sir..."
-class StubController(nova.wsgi.Controller):
+class StubController(object):
def __init__(self, body):
self.body = body
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 2c329f920..9f1f28611 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -28,6 +28,7 @@ import shutil
import tempfile
import xml.dom.minidom as minidom
+import mox
import stubout
import webob
@@ -708,6 +709,146 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.assertDictListMatch(expected, response_list)
+ def test_image_filter_with_name(self):
+ mocker = mox.Mox()
+ image_service = mocker.CreateMockAnything()
+ context = object()
+ filters = {'name': 'testname'}
+ image_service.index(context, filters).AndReturn([])
+ mocker.ReplayAll()
+ request = webob.Request.blank(
+ '/v1.1/images?name=testname')
+ request.environ['nova.context'] = context
+ controller = images.ControllerV11(image_service=image_service)
+ controller.index(request)
+ mocker.VerifyAll()
+
+ def test_image_filter_with_status(self):
+ mocker = mox.Mox()
+ image_service = mocker.CreateMockAnything()
+ context = object()
+ filters = {'status': 'ACTIVE'}
+ image_service.index(context, filters).AndReturn([])
+ mocker.ReplayAll()
+ request = webob.Request.blank(
+ '/v1.1/images?status=ACTIVE')
+ request.environ['nova.context'] = context
+ controller = images.ControllerV11(image_service=image_service)
+ controller.index(request)
+ mocker.VerifyAll()
+
+ def test_image_filter_with_property(self):
+ mocker = mox.Mox()
+ image_service = mocker.CreateMockAnything()
+ context = object()
+ filters = {'property-test': '3'}
+ image_service.index(context, filters).AndReturn([])
+ mocker.ReplayAll()
+ request = webob.Request.blank(
+ '/v1.1/images?property-test=3')
+ request.environ['nova.context'] = context
+ controller = images.ControllerV11(image_service=image_service)
+ controller.index(request)
+ mocker.VerifyAll()
+
+ def test_image_filter_not_supported(self):
+ mocker = mox.Mox()
+ image_service = mocker.CreateMockAnything()
+ context = object()
+ filters = {'status': 'ACTIVE'}
+ image_service.index(context, filters).AndReturn([])
+ mocker.ReplayAll()
+ request = webob.Request.blank(
+ '/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname')
+ request.environ['nova.context'] = context
+ controller = images.ControllerV11(image_service=image_service)
+ controller.index(request)
+ mocker.VerifyAll()
+
+ def test_image_no_filters(self):
+ mocker = mox.Mox()
+ image_service = mocker.CreateMockAnything()
+ context = object()
+ filters = {}
+ image_service.index(context, filters).AndReturn([])
+ mocker.ReplayAll()
+ request = webob.Request.blank(
+ '/v1.1/images')
+ request.environ['nova.context'] = context
+ controller = images.ControllerV11(image_service=image_service)
+ controller.index(request)
+ mocker.VerifyAll()
+
+ def test_image_detail_filter_with_name(self):
+ mocker = mox.Mox()
+ image_service = mocker.CreateMockAnything()
+ context = object()
+ filters = {'name': 'testname'}
+ image_service.detail(context, filters).AndReturn([])
+ mocker.ReplayAll()
+ request = webob.Request.blank(
+ '/v1.1/images/detail?name=testname')
+ request.environ['nova.context'] = context
+ controller = images.ControllerV11(image_service=image_service)
+ controller.detail(request)
+ mocker.VerifyAll()
+
+ def test_image_detail_filter_with_status(self):
+ mocker = mox.Mox()
+ image_service = mocker.CreateMockAnything()
+ context = object()
+ filters = {'status': 'ACTIVE'}
+ image_service.detail(context, filters).AndReturn([])
+ mocker.ReplayAll()
+ request = webob.Request.blank(
+ '/v1.1/images/detail?status=ACTIVE')
+ request.environ['nova.context'] = context
+ controller = images.ControllerV11(image_service=image_service)
+ controller.detail(request)
+ mocker.VerifyAll()
+
+ def test_image_detail_filter_with_property(self):
+ mocker = mox.Mox()
+ image_service = mocker.CreateMockAnything()
+ context = object()
+ filters = {'property-test': '3'}
+ image_service.detail(context, filters).AndReturn([])
+ mocker.ReplayAll()
+ request = webob.Request.blank(
+ '/v1.1/images/detail?property-test=3')
+ request.environ['nova.context'] = context
+ controller = images.ControllerV11(image_service=image_service)
+ controller.detail(request)
+ mocker.VerifyAll()
+
+ def test_image_detail_filter_not_supported(self):
+ mocker = mox.Mox()
+ image_service = mocker.CreateMockAnything()
+ context = object()
+ filters = {'status': 'ACTIVE'}
+ image_service.detail(context, filters).AndReturn([])
+ mocker.ReplayAll()
+ request = webob.Request.blank(
+ '/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname')
+ request.environ['nova.context'] = context
+ controller = images.ControllerV11(image_service=image_service)
+ controller.detail(request)
+ mocker.VerifyAll()
+
+ def test_image_detail_no_filters(self):
+ mocker = mox.Mox()
+ image_service = mocker.CreateMockAnything()
+ context = object()
+ filters = {}
+ image_service.detail(context, filters).AndReturn([])
+ mocker.ReplayAll()
+ request = webob.Request.blank(
+ '/v1.1/images/detail')
+ request.environ['nova.context'] = context
+ controller = images.ControllerV11(image_service=image_service)
+ controller.detail(request)
+ mocker.VerifyAll()
+
def test_get_image_found(self):
req = webob.Request.blank('/v1.0/images/123')
res = req.get_response(fakes.wsgi_app())
diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py
index 70f59eda6..01613d1d8 100644
--- a/nova/tests/api/openstack/test_limits.py
+++ b/nova/tests/api/openstack/test_limits.py
@@ -73,7 +73,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite):
def setUp(self):
"""Run before each test."""
BaseLimitTestSuite.setUp(self)
- self.controller = limits.LimitsControllerV10()
+ self.controller = limits.create_resource('1.0')
def _get_index_request(self, accept_header="application/json"):
"""Helper to set routing arguments."""
@@ -209,7 +209,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite):
def setUp(self):
"""Run before each test."""
BaseLimitTestSuite.setUp(self)
- self.controller = limits.LimitsControllerV11()
+ self.controller = limits.create_resource('1.1')
def _get_index_request(self, accept_header="application/json"):
"""Helper to set routing arguments."""
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index e0910fed6..11dcaaade 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -217,7 +217,6 @@ class ServersTest(test.TestCase):
},
]
- print res_dict['server']
self.assertEqual(res_dict['server']['links'], expected_links)
def test_get_server_by_id_with_addresses_xml(self):
@@ -773,9 +772,7 @@ class ServersTest(test.TestCase):
self.body = json.dumps(dict(server=inst_dict))
def server_update(context, id, params):
- filtered_dict = dict(
- display_name='server_test'
- )
+ filtered_dict = dict(display_name='server_test')
self.assertEqual(params, filtered_dict)
return filtered_dict
@@ -844,7 +841,6 @@ class ServersTest(test.TestCase):
req = webob.Request.blank('/v1.0/servers/detail')
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
- print res.body
dom = minidom.parseString(res.body)
for i, server in enumerate(dom.getElementsByTagName('server')):
self.assertEqual(server.getAttribute('id'), str(i))
@@ -1008,6 +1004,14 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 501)
+ def test_server_change_password_xml(self):
+ req = webob.Request.blank('/v1.0/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/xml'
+ req.body = '<changePassword adminPass="1234pass">'
+# res = req.get_response(fakes.wsgi_app())
+# self.assertEqual(res.status_int, 501)
+
def test_server_change_password_v1_1(self):
mock_method = MockSetAdminPassword()
self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
@@ -1399,13 +1403,13 @@ class ServersTest(test.TestCase):
class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
def setUp(self):
- self.deserializer = servers.ServerCreateRequestXMLDeserializer()
+ self.deserializer = servers.ServerXMLDeserializer()
def test_minimal_request(self):
serial_request = """
<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
name="new-server-test" imageId="1" flavorId="1"/>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"server": {
"name": "new-server-test",
"imageId": "1",
@@ -1419,7 +1423,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
name="new-server-test" imageId="1" flavorId="1">
<metadata/>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"server": {
"name": "new-server-test",
"imageId": "1",
@@ -1434,7 +1438,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
name="new-server-test" imageId="1" flavorId="1">
<personality/>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"server": {
"name": "new-server-test",
"imageId": "1",
@@ -1450,7 +1454,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<metadata/>
<personality/>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"server": {
"name": "new-server-test",
"imageId": "1",
@@ -1467,7 +1471,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<personality/>
<metadata/>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"server": {
"name": "new-server-test",
"imageId": "1",
@@ -1485,7 +1489,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<file path="/etc/conf">aabbccdd</file>
</personality>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
self.assertEquals(request["server"]["personality"], expected)
@@ -1495,7 +1499,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
name="new-server-test" imageId="1" flavorId="1">
<personality><file path="/etc/conf">aabbccdd</file>
<file path="/etc/sudoers">abcd</file></personality></server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": "aabbccdd"},
{"path": "/etc/sudoers", "contents": "abcd"}]
self.assertEquals(request["server"]["personality"], expected)
@@ -1511,7 +1515,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<file path="/etc/ignoreme">anything</file>
</personality>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
self.assertEquals(request["server"]["personality"], expected)
@@ -1520,7 +1524,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
name="new-server-test" imageId="1" flavorId="1">
<personality><file>aabbccdd</file></personality></server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"contents": "aabbccdd"}]
self.assertEquals(request["server"]["personality"], expected)
@@ -1529,7 +1533,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
name="new-server-test" imageId="1" flavorId="1">
<personality><file path="/etc/conf"></file></personality></server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": ""}]
self.assertEquals(request["server"]["personality"], expected)
@@ -1538,7 +1542,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
name="new-server-test" imageId="1" flavorId="1">
<personality><file path="/etc/conf"/></personality></server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = [{"path": "/etc/conf", "contents": ""}]
self.assertEquals(request["server"]["personality"], expected)
@@ -1550,7 +1554,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta key="alpha">beta</meta>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"alpha": "beta"}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1563,7 +1567,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta key="foo">bar</meta>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"alpha": "beta", "foo": "bar"}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1575,7 +1579,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta key="alpha"></meta>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"alpha": ""}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1588,7 +1592,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta key="delta"/>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"alpha": "", "delta": ""}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1600,7 +1604,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta>beta</meta>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"": "beta"}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1613,7 +1617,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta>gamma</meta>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"": "gamma"}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1626,7 +1630,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
<meta key="foo">baz</meta>
</metadata>
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
expected = {"foo": "baz"}
self.assertEquals(request["server"]["metadata"], expected)
@@ -1673,7 +1677,7 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
},
],
}}
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
self.assertEqual(request, expected)
def test_request_xmlser_with_flavor_image_ref(self):
@@ -1683,7 +1687,7 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
imageRef="http://localhost:8774/v1.1/images/1"
flavorRef="http://localhost:8774/v1.1/flavors/1">
</server>"""
- request = self.deserializer.deserialize(serial_request)
+ request = self.deserializer.deserialize(serial_request, 'create')
self.assertEquals(request["server"]["flavorRef"],
"http://localhost:8774/v1.1/flavors/1")
self.assertEquals(request["server"]["imageRef"],
diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py
new file mode 100644
index 000000000..ebbdc9409
--- /dev/null
+++ b/nova/tests/api/openstack/test_wsgi.py
@@ -0,0 +1,293 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+import json
+import webob
+
+from nova import exception
+from nova import test
+from nova.api.openstack import wsgi
+
+
+class RequestTest(test.TestCase):
+ def test_content_type_missing(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.body = "<body />"
+ self.assertRaises(exception.InvalidContentType,
+ request.get_content_type)
+
+ def test_content_type_unsupported(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Content-Type"] = "text/html"
+ request.body = "asdf<br />"
+ self.assertRaises(exception.InvalidContentType,
+ request.get_content_type)
+
+ def test_content_type_with_charset(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Content-Type"] = "application/json; charset=UTF-8"
+ result = request.get_content_type()
+ self.assertEqual(result, "application/json")
+
+ def test_content_type_from_accept_xml(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml, application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = \
+ "application/json; q=0.3, application/xml; q=0.9"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_from_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ request = wsgi.Request.blank('/tests/123.json')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123.invalid')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ def test_content_type_accept_and_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ request.headers["Accept"] = "application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_accept_default(self):
+ request = wsgi.Request.blank('/tests/123.unsupported')
+ request.headers["Accept"] = "application/unsupported1"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+
+class DictSerializerTest(test.TestCase):
+ def test_dispatch(self):
+ serializer = wsgi.DictSerializer()
+ serializer.create = lambda x: 'pants'
+ serializer.default = lambda x: 'trousers'
+ self.assertEqual(serializer.serialize({}, 'create'), 'pants')
+
+ def test_dispatch_default(self):
+ serializer = wsgi.DictSerializer()
+ serializer.create = lambda x: 'pants'
+ serializer.default = lambda x: 'trousers'
+ self.assertEqual(serializer.serialize({}, 'update'), 'trousers')
+
+
+class XMLDictSerializerTest(test.TestCase):
+ def test_xml(self):
+ input_dict = dict(servers=dict(a=(2, 3)))
+ expected_xml = '<serversxmlns="asdf"><a>(2,3)</a></servers>'
+ serializer = wsgi.XMLDictSerializer(xmlns="asdf")
+ result = serializer.serialize(input_dict)
+ result = result.replace('\n', '').replace(' ', '')
+ self.assertEqual(result, expected_xml)
+
+
+class JSONDictSerializerTest(test.TestCase):
+ def test_json(self):
+ input_dict = dict(servers=dict(a=(2, 3)))
+ expected_json = '{"servers":{"a":[2,3]}}'
+ serializer = wsgi.JSONDictSerializer()
+ result = serializer.serialize(input_dict)
+ result = result.replace('\n', '').replace(' ', '')
+ self.assertEqual(result, expected_json)
+
+
+class TextDeserializerTest(test.TestCase):
+ def test_dispatch(self):
+ deserializer = wsgi.TextDeserializer()
+ deserializer.create = lambda x: 'pants'
+ deserializer.default = lambda x: 'trousers'
+ self.assertEqual(deserializer.deserialize({}, 'create'), 'pants')
+
+ def test_dispatch_default(self):
+ deserializer = wsgi.TextDeserializer()
+ deserializer.create = lambda x: 'pants'
+ deserializer.default = lambda x: 'trousers'
+ self.assertEqual(deserializer.deserialize({}, 'update'), 'trousers')
+
+
+class JSONDeserializerTest(test.TestCase):
+ def test_json(self):
+ data = """{"a": {
+ "a1": "1",
+ "a2": "2",
+ "bs": ["1", "2", "3", {"c": {"c1": "1"}}],
+ "d": {"e": "1"},
+ "f": "1"}}"""
+ as_dict = dict(a={
+ 'a1': '1',
+ 'a2': '2',
+ 'bs': ['1', '2', '3', {'c': dict(c1='1')}],
+ 'd': {'e': '1'},
+ 'f': '1'})
+ deserializer = wsgi.JSONDeserializer()
+ self.assertEqual(deserializer.deserialize(data), as_dict)
+
+
+class XMLDeserializerTest(test.TestCase):
+ def test_xml(self):
+ xml = """
+ <a a1="1" a2="2">
+ <bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
+ <d><e>1</e></d>
+ <f>1</f>
+ </a>
+ """.strip()
+ as_dict = dict(a={
+ 'a1': '1',
+ 'a2': '2',
+ 'bs': ['1', '2', '3', {'c': dict(c1='1')}],
+ 'd': {'e': '1'},
+ 'f': '1'})
+ metadata = {'plurals': {'bs': 'b', 'ts': 't'}}
+ deserializer = wsgi.XMLDeserializer(metadata=metadata)
+ self.assertEqual(deserializer.deserialize(xml), as_dict)
+
+ def test_xml_empty(self):
+ xml = """<a></a>"""
+ as_dict = {"a": {}}
+ deserializer = wsgi.XMLDeserializer()
+ self.assertEqual(deserializer.deserialize(xml), as_dict)
+
+
+class ResponseSerializerTest(test.TestCase):
+ def setUp(self):
+ class JSONSerializer(object):
+ def serialize(self, data):
+ return 'pew_json'
+
+ class XMLSerializer(object):
+ def serialize(self, data):
+ return 'pew_xml'
+
+ self.serializers = {
+ 'application/json': JSONSerializer(),
+ 'application/XML': XMLSerializer(),
+ }
+
+ self.serializer = wsgi.ResponseSerializer(serializers=self.serializers)
+
+ def tearDown(self):
+ pass
+
+ def test_get_serializer(self):
+ self.assertEqual(self.serializer.get_serializer('application/json'),
+ self.serializers['application/json'])
+
+ def test_get_serializer_unknown_content_type(self):
+ self.assertRaises(exception.InvalidContentType,
+ self.serializer.get_serializer,
+ 'application/unknown')
+
+ def test_serialize_response(self):
+ response = self.serializer.serialize({}, 'application/json')
+ self.assertEqual(response.headers['Content-Type'], 'application/json')
+ self.assertEqual(response.body, 'pew_json')
+
+ def test_serialize_response_dict_to_unknown_content_type(self):
+ self.assertRaises(exception.InvalidContentType,
+ self.serializer.serialize,
+ {}, 'application/unknown')
+
+
+class RequestDeserializerTest(test.TestCase):
+ def setUp(self):
+ class JSONDeserializer(object):
+ def deserialize(self, data):
+ return 'pew_json'
+
+ class XMLDeserializer(object):
+ def deserialize(self, data):
+ return 'pew_xml'
+
+ self.deserializers = {
+ 'application/json': JSONDeserializer(),
+ 'application/XML': XMLDeserializer(),
+ }
+
+ self.deserializer = wsgi.RequestDeserializer(
+ deserializers=self.deserializers)
+
+ def tearDown(self):
+ pass
+
+ def test_get_deserializer(self):
+ expected = self.deserializer.get_deserializer('application/json')
+ self.assertEqual(expected, self.deserializers['application/json'])
+
+ def test_get_deserializer_unknown_content_type(self):
+ self.assertRaises(exception.InvalidContentType,
+ self.deserializer.get_deserializer,
+ 'application/unknown')
+
+ def test_get_expected_content_type(self):
+ request = wsgi.Request.blank('/')
+ request.headers['Accept'] = 'application/json'
+ self.assertEqual(self.deserializer.get_expected_content_type(request),
+ 'application/json')
+
+ def test_get_action_args(self):
+ env = {
+ 'wsgiorg.routing_args': [None, {
+ 'controller': None,
+ 'format': None,
+ 'action': 'update',
+ 'id': 12,
+ }],
+ }
+
+ expected = {'action': 'update', 'id': 12}
+
+ self.assertEqual(self.deserializer.get_action_args(env), expected)
+
+ def test_deserialize(self):
+ def fake_get_routing_args(request):
+ return {'action': 'create'}
+ self.deserializer.get_action_args = fake_get_routing_args
+
+ request = wsgi.Request.blank('/')
+ request.headers['Accept'] = 'application/xml'
+
+ deserialized = self.deserializer.deserialize(request)
+ expected = ('create', {}, 'application/xml')
+
+ self.assertEqual(expected, deserialized)
+
+
+class ResourceTest(test.TestCase):
+ def test_dispatch(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ resource = wsgi.Resource(Controller())
+ actual = resource.dispatch(None, 'index', {'pants': 'off'})
+ expected = 'off'
+ self.assertEqual(actual, expected)
+
+ def test_dispatch_unknown_controller_action(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ resource = wsgi.Resource(Controller())
+ self.assertRaises(AttributeError, resource.dispatch,
+ None, 'create', {})
diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py
index 5820ecdc2..d33268296 100644
--- a/nova/tests/api/test_wsgi.py
+++ b/nova/tests/api/test_wsgi.py
@@ -67,192 +67,3 @@ class Test(test.TestCase):
self.assertEqual(result.body, "Router result")
result = webob.Request.blank('/bad').get_response(Router())
self.assertNotEqual(result.body, "Router result")
-
-
-class ControllerTest(test.TestCase):
-
- class TestRouter(wsgi.Router):
-
- class TestController(wsgi.Controller):
-
- _serialization_metadata = {
- 'application/xml': {
- "attributes": {
- "test": ["id"]}}}
-
- def show(self, req, id): # pylint: disable=W0622,C0103
- return {"test": {"id": id}}
-
- def __init__(self):
- mapper = routes.Mapper()
- mapper.resource("test", "tests", controller=self.TestController())
- wsgi.Router.__init__(self, mapper)
-
- def test_show(self):
- request = wsgi.Request.blank('/tests/123')
- result = request.get_response(self.TestRouter())
- self.assertEqual(json.loads(result.body), {"test": {"id": "123"}})
-
- def test_response_content_type_from_accept_xml(self):
- request = webob.Request.blank('/tests/123')
- request.headers["Accept"] = "application/xml"
- result = request.get_response(self.TestRouter())
- self.assertEqual(result.headers["Content-Type"], "application/xml")
-
- def test_response_content_type_from_accept_json(self):
- request = wsgi.Request.blank('/tests/123')
- request.headers["Accept"] = "application/json"
- result = request.get_response(self.TestRouter())
- self.assertEqual(result.headers["Content-Type"], "application/json")
-
- def test_response_content_type_from_query_extension_xml(self):
- request = wsgi.Request.blank('/tests/123.xml')
- result = request.get_response(self.TestRouter())
- self.assertEqual(result.headers["Content-Type"], "application/xml")
-
- def test_response_content_type_from_query_extension_json(self):
- request = wsgi.Request.blank('/tests/123.json')
- result = request.get_response(self.TestRouter())
- self.assertEqual(result.headers["Content-Type"], "application/json")
-
- def test_response_content_type_default_when_unsupported(self):
- request = wsgi.Request.blank('/tests/123.unsupported')
- request.headers["Accept"] = "application/unsupported1"
- result = request.get_response(self.TestRouter())
- self.assertEqual(result.status_int, 200)
- self.assertEqual(result.headers["Content-Type"], "application/json")
-
-
-class RequestTest(test.TestCase):
-
- def test_request_content_type_missing(self):
- request = wsgi.Request.blank('/tests/123')
- request.body = "<body />"
- self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
-
- def test_request_content_type_unsupported(self):
- request = wsgi.Request.blank('/tests/123')
- request.headers["Content-Type"] = "text/html"
- request.body = "asdf<br />"
- self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
-
- def test_request_content_type_with_charset(self):
- request = wsgi.Request.blank('/tests/123')
- request.headers["Content-Type"] = "application/json; charset=UTF-8"
- result = request.get_content_type()
- self.assertEqual(result, "application/json")
-
- def test_content_type_from_accept_xml(self):
- request = wsgi.Request.blank('/tests/123')
- request.headers["Accept"] = "application/xml"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/xml")
-
- request = wsgi.Request.blank('/tests/123')
- request.headers["Accept"] = "application/json"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
- request = wsgi.Request.blank('/tests/123')
- request.headers["Accept"] = "application/xml, application/json"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
- request = wsgi.Request.blank('/tests/123')
- request.headers["Accept"] = \
- "application/json; q=0.3, application/xml; q=0.9"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/xml")
-
- def test_content_type_from_query_extension(self):
- request = wsgi.Request.blank('/tests/123.xml')
- result = request.best_match_content_type()
- self.assertEqual(result, "application/xml")
-
- request = wsgi.Request.blank('/tests/123.json')
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
- request = wsgi.Request.blank('/tests/123.invalid')
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
- def test_content_type_accept_and_query_extension(self):
- request = wsgi.Request.blank('/tests/123.xml')
- request.headers["Accept"] = "application/json"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/xml")
-
- def test_content_type_accept_default(self):
- request = wsgi.Request.blank('/tests/123.unsupported')
- request.headers["Accept"] = "application/unsupported1"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
-
-class SerializerTest(test.TestCase):
-
- def test_xml(self):
- input_dict = dict(servers=dict(a=(2, 3)))
- expected_xml = '<servers><a>(2,3)</a></servers>'
- serializer = wsgi.Serializer()
- result = serializer.serialize(input_dict, "application/xml")
- result = result.replace('\n', '').replace(' ', '')
- self.assertEqual(result, expected_xml)
-
- def test_json(self):
- input_dict = dict(servers=dict(a=(2, 3)))
- expected_json = '{"servers":{"a":[2,3]}}'
- serializer = wsgi.Serializer()
- result = serializer.serialize(input_dict, "application/json")
- result = result.replace('\n', '').replace(' ', '')
- self.assertEqual(result, expected_json)
-
- def test_unsupported_content_type(self):
- serializer = wsgi.Serializer()
- self.assertRaises(exception.InvalidContentType, serializer.serialize,
- {}, "text/null")
-
- def test_deserialize_json(self):
- data = """{"a": {
- "a1": "1",
- "a2": "2",
- "bs": ["1", "2", "3", {"c": {"c1": "1"}}],
- "d": {"e": "1"},
- "f": "1"}}"""
- as_dict = dict(a={
- 'a1': '1',
- 'a2': '2',
- 'bs': ['1', '2', '3', {'c': dict(c1='1')}],
- 'd': {'e': '1'},
- 'f': '1'})
- metadata = {}
- serializer = wsgi.Serializer(metadata)
- self.assertEqual(serializer.deserialize(data, "application/json"),
- as_dict)
-
- def test_deserialize_xml(self):
- xml = """
- <a a1="1" a2="2">
- <bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
- <d><e>1</e></d>
- <f>1</f>
- </a>
- """.strip()
- as_dict = dict(a={
- 'a1': '1',
- 'a2': '2',
- 'bs': ['1', '2', '3', {'c': dict(c1='1')}],
- 'd': {'e': '1'},
- 'f': '1'})
- metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})}
- serializer = wsgi.Serializer(metadata)
- self.assertEqual(serializer.deserialize(xml, "application/xml"),
- as_dict)
-
- def test_deserialize_empty_xml(self):
- xml = """<a></a>"""
- as_dict = {"a": {}}
- serializer = wsgi.Serializer()
- self.assertEqual(serializer.deserialize(xml, "application/xml"),
- as_dict)
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 109905ded..6d108d494 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -34,7 +34,7 @@ class StubGlanceClient(object):
def get_image_meta(self, image_id):
return self.images[image_id]
- def get_images_detailed(self):
+ def get_images_detailed(self, filters=None):
return self.images.itervalues()
def get_image(self, image_id):
diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py
index 7e20c9b00..eb9a3056e 100644
--- a/nova/tests/integrated/api/client.py
+++ b/nova/tests/integrated/api/client.py
@@ -152,7 +152,10 @@ class TestOpenStackClient(object):
def _decode_json(self, response):
body = response.read()
LOG.debug(_("Decoding JSON: %s") % (body))
- return json.loads(body)
+ if body:
+ return json.loads(body)
+ else:
+ return ""
def api_get(self, relative_uri, **kwargs):
kwargs.setdefault('check_response_status', [200])
@@ -166,7 +169,7 @@ class TestOpenStackClient(object):
headers['Content-Type'] = 'application/json'
kwargs['body'] = json.dumps(body)
- kwargs.setdefault('check_response_status', [200])
+ kwargs.setdefault('check_response_status', [200, 202])
response = self.api_request(relative_uri, **kwargs)
return self._decode_json(response)
@@ -185,6 +188,9 @@ class TestOpenStackClient(object):
def post_server(self, server):
return self.api_post('/servers', server)['server']
+ def post_server_action(self, server_id, data):
+ return self.api_post('/servers/%s/action' % server_id, data)
+
def delete_server(self, server_id):
return self.api_delete('/servers/%s' % server_id)
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index e89d0100a..fcb517cf5 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -179,6 +179,112 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
# Cleanup
self._delete_server(created_server_id)
+ def test_create_and_rebuild_server(self):
+ """Rebuild a server."""
+
+ # create a server with initially has no metadata
+ server = self._build_minimal_create_server_request()
+ server_post = {'server': server}
+ created_server = self.api.post_server(server_post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # rebuild the server with metadata
+ post = {}
+ post['rebuild'] = {
+ "imageRef": "https://localhost/v1.1/32278/images/2",
+ "name": "blah",
+ }
+
+ self.api.post_server_action(created_server_id, post)
+ LOG.debug("rebuilt server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+ self.assertEqual({}, found_server.get('metadata'))
+ self.assertEqual('blah', found_server.get('name'))
+
+ # Cleanup
+ self._delete_server(created_server_id)
+
+ def test_create_and_rebuild_server_with_metadata(self):
+ """Rebuild a server with metadata."""
+
+ # create a server with initially has no metadata
+ server = self._build_minimal_create_server_request()
+ server_post = {'server': server}
+ created_server = self.api.post_server(server_post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # rebuild the server with metadata
+ post = {}
+ post['rebuild'] = {
+ "imageRef": "https://localhost/v1.1/32278/images/2",
+ "name": "blah",
+ }
+
+ metadata = {}
+ for i in range(30):
+ metadata['key_%s' % i] = 'value_%s' % i
+
+ post['rebuild']['metadata'] = metadata
+
+ self.api.post_server_action(created_server_id, post)
+ LOG.debug("rebuilt server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+ self.assertEqual(metadata, found_server.get('metadata'))
+ self.assertEqual('blah', found_server.get('name'))
+
+ # Cleanup
+ self._delete_server(created_server_id)
+
+ def test_create_and_rebuild_server_with_metadata_removal(self):
+ """Rebuild a server with metadata."""
+
+ # create a server with initially has no metadata
+ server = self._build_minimal_create_server_request()
+ server_post = {'server': server}
+
+ metadata = {}
+ for i in range(30):
+ metadata['key_%s' % i] = 'value_%s' % i
+
+ server_post['server']['metadata'] = metadata
+
+ created_server = self.api.post_server(server_post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # rebuild the server with metadata
+ post = {}
+ post['rebuild'] = {
+ "imageRef": "https://localhost/v1.1/32278/images/2",
+ "name": "blah",
+ }
+
+ metadata = {}
+ post['rebuild']['metadata'] = metadata
+
+ self.api.post_server_action(created_server_id, post)
+ LOG.debug("rebuilt server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+ self.assertEqual(metadata, found_server.get('metadata'))
+ self.assertEqual('blah', found_server.get('name'))
+
+ # Cleanup
+ self._delete_server(created_server_id)
+
if __name__ == "__main__":
unittest.main()
diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py
index 8a9754777..fde32f797 100644
--- a/nova/tests/integrated/test_xml.py
+++ b/nova/tests/integrated/test_xml.py
@@ -32,7 +32,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase):
""""Some basic XML sanity checks."""
def test_namespace_limits(self):
- """/limits should have v1.0 namespace (hasn't changed in 1.1)."""
+ """/limits should have v1.1 namespace (has changed in 1.1)."""
headers = {}
headers['Accept'] = 'application/xml'
@@ -40,7 +40,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase):
data = response.read()
LOG.debug("data: %s" % data)
- prefix = '<limits xmlns="%s"' % common.XML_NS_V10
+ prefix = '<limits xmlns="%s"' % common.XML_NS_V11
self.assertTrue(data.startswith(prefix))
def test_namespace_servers(self):
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index b64be662e..55ea6be02 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -169,6 +169,25 @@ class CloudTestCase(test.TestCase):
db.volume_destroy(self.context, vol1['id'])
db.volume_destroy(self.context, vol2['id'])
+ def test_create_volume_from_snapshot(self):
+ """Makes sure create_volume works when we specify a snapshot."""
+ vol = db.volume_create(self.context, {'size': 1})
+ snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
+ 'volume_size': vol['size'],
+ 'status': "available"})
+ snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
+
+ result = self.cloud.create_volume(self.context,
+ snapshot_id=snapshot_id)
+ volume_id = result['volumeId']
+ result = self.cloud.describe_volumes(self.context)
+ self.assertEqual(len(result['volumeSet']), 2)
+ self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id)
+
+ db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id))
+ db.snapshot_destroy(self.context, snap['id'])
+ db.volume_destroy(self.context, vol['id'])
+
def test_describe_availability_zones(self):
"""Makes sure describe_availability_zones works and filters results."""
service1 = db.service_create(self.context, {'host': 'host1_zones',
@@ -186,6 +205,52 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
+ def test_describe_snapshots(self):
+ """Makes sure describe_snapshots works and filters results."""
+ vol = db.volume_create(self.context, {})
+ snap1 = db.snapshot_create(self.context, {'volume_id': vol['id']})
+ snap2 = db.snapshot_create(self.context, {'volume_id': vol['id']})
+ result = self.cloud.describe_snapshots(self.context)
+ self.assertEqual(len(result['snapshotSet']), 2)
+ snapshot_id = ec2utils.id_to_ec2_id(snap2['id'], 'snap-%08x')
+ result = self.cloud.describe_snapshots(self.context,
+ snapshot_id=[snapshot_id])
+ self.assertEqual(len(result['snapshotSet']), 1)
+ self.assertEqual(
+ ec2utils.ec2_id_to_id(result['snapshotSet'][0]['snapshotId']),
+ snap2['id'])
+ db.snapshot_destroy(self.context, snap1['id'])
+ db.snapshot_destroy(self.context, snap2['id'])
+ db.volume_destroy(self.context, vol['id'])
+
+ def test_create_snapshot(self):
+ """Makes sure create_snapshot works."""
+ vol = db.volume_create(self.context, {'status': "available"})
+ volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x')
+
+ result = self.cloud.create_snapshot(self.context,
+ volume_id=volume_id)
+ snapshot_id = result['snapshotId']
+ result = self.cloud.describe_snapshots(self.context)
+ self.assertEqual(len(result['snapshotSet']), 1)
+ self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
+
+ db.snapshot_destroy(self.context, ec2utils.ec2_id_to_id(snapshot_id))
+ db.volume_destroy(self.context, vol['id'])
+
+ def test_delete_snapshot(self):
+ """Makes sure delete_snapshot works."""
+ vol = db.volume_create(self.context, {'status': "available"})
+ snap = db.snapshot_create(self.context, {'volume_id': vol['id'],
+ 'status': "available"})
+ snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x')
+
+ result = self.cloud.delete_snapshot(self.context,
+ snapshot_id=snapshot_id)
+ self.assertTrue(result)
+
+ db.volume_destroy(self.context, vol['id'])
+
def test_describe_instances(self):
"""Makes sure describe_instances works and filters results."""
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py
index c029d41e6..3361c7b73 100644
--- a/nova/tests/test_host_filter.py
+++ b/nova/tests/test_host_filter.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
-Tests For Scheduler Host Filter Drivers.
+Tests For Scheduler Host Filters.
"""
import json
@@ -31,7 +31,7 @@ class FakeZoneManager:
class HostFilterTestCase(test.TestCase):
- """Test case for host filter drivers."""
+ """Test case for host filters."""
def _host_caps(self, multiplier):
# Returns host capabilities in the following way:
@@ -57,8 +57,8 @@ class HostFilterTestCase(test.TestCase):
'host_name-label': 'xs-%s' % multiplier}
def setUp(self):
- self.old_flag = FLAGS.default_host_filter_driver
- FLAGS.default_host_filter_driver = \
+ self.old_flag = FLAGS.default_host_filter
+ FLAGS.default_host_filter = \
'nova.scheduler.host_filter.AllHostsFilter'
self.instance_type = dict(name='tiny',
memory_mb=50,
@@ -76,51 +76,52 @@ class HostFilterTestCase(test.TestCase):
self.zone_manager.service_states = states
def tearDown(self):
- FLAGS.default_host_filter_driver = self.old_flag
+ FLAGS.default_host_filter = self.old_flag
- def test_choose_driver(self):
- # Test default driver ...
- driver = host_filter.choose_driver()
- self.assertEquals(driver._full_name(),
+ def test_choose_filter(self):
+ # Test default filter ...
+ hf = host_filter.choose_host_filter()
+ self.assertEquals(hf._full_name(),
'nova.scheduler.host_filter.AllHostsFilter')
- # Test valid driver ...
- driver = host_filter.choose_driver(
- 'nova.scheduler.host_filter.FlavorFilter')
- self.assertEquals(driver._full_name(),
- 'nova.scheduler.host_filter.FlavorFilter')
- # Test invalid driver ...
+ # Test valid filter ...
+ hf = host_filter.choose_host_filter(
+ 'nova.scheduler.host_filter.InstanceTypeFilter')
+ self.assertEquals(hf._full_name(),
+ 'nova.scheduler.host_filter.InstanceTypeFilter')
+ # Test invalid filter ...
try:
- host_filter.choose_driver('does not exist')
- self.fail("Should not find driver")
- except exception.SchedulerHostFilterDriverNotFound:
+ host_filter.choose_host_filter('does not exist')
+ self.fail("Should not find host filter.")
+ except exception.SchedulerHostFilterNotFound:
pass
- def test_all_host_driver(self):
- driver = host_filter.AllHostsFilter()
- cooked = driver.instance_type_to_filter(self.instance_type)
- hosts = driver.filter_hosts(self.zone_manager, cooked)
+ def test_all_host_filter(self):
+ hf = host_filter.AllHostsFilter()
+ cooked = hf.instance_type_to_filter(self.instance_type)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
self.assertEquals(10, len(hosts))
for host, capabilities in hosts:
self.assertTrue(host.startswith('host'))
- def test_flavor_driver(self):
- driver = host_filter.FlavorFilter()
+ def test_instance_type_filter(self):
+ hf = host_filter.InstanceTypeFilter()
# filter all hosts that can support 50 ram and 500 disk
- name, cooked = driver.instance_type_to_filter(self.instance_type)
- self.assertEquals('nova.scheduler.host_filter.FlavorFilter', name)
- hosts = driver.filter_hosts(self.zone_manager, cooked)
+ name, cooked = hf.instance_type_to_filter(self.instance_type)
+ self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
+ name)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
self.assertEquals(6, len(hosts))
just_hosts = [host for host, caps in hosts]
just_hosts.sort()
self.assertEquals('host05', just_hosts[0])
self.assertEquals('host10', just_hosts[5])
- def test_json_driver(self):
- driver = host_filter.JsonFilter()
+ def test_json_filter(self):
+ hf = host_filter.JsonFilter()
# filter all hosts that can support 50 ram and 500 disk
- name, cooked = driver.instance_type_to_filter(self.instance_type)
+ name, cooked = hf.instance_type_to_filter(self.instance_type)
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
- hosts = driver.filter_hosts(self.zone_manager, cooked)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
self.assertEquals(6, len(hosts))
just_hosts = [host for host, caps in hosts]
just_hosts.sort()
@@ -132,15 +133,16 @@ class HostFilterTestCase(test.TestCase):
raw = ['or',
['and',
['<', '$compute.host_memory_free', 30],
- ['<', '$compute.disk_available', 300]
+ ['<', '$compute.disk_available', 300],
],
['and',
['>', '$compute.host_memory_free', 70],
- ['>', '$compute.disk_available', 700]
- ]
+ ['>', '$compute.disk_available', 700],
+ ],
]
+
cooked = json.dumps(raw)
- hosts = driver.filter_hosts(self.zone_manager, cooked)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
self.assertEquals(5, len(hosts))
just_hosts = [host for host, caps in hosts]
@@ -152,7 +154,7 @@ class HostFilterTestCase(test.TestCase):
['=', '$compute.host_memory_free', 30],
]
cooked = json.dumps(raw)
- hosts = driver.filter_hosts(self.zone_manager, cooked)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
self.assertEquals(9, len(hosts))
just_hosts = [host for host, caps in hosts]
@@ -162,7 +164,7 @@ class HostFilterTestCase(test.TestCase):
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
cooked = json.dumps(raw)
- hosts = driver.filter_hosts(self.zone_manager, cooked)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
self.assertEquals(5, len(hosts))
just_hosts = [host for host, caps in hosts]
@@ -174,35 +176,30 @@ class HostFilterTestCase(test.TestCase):
raw = ['unknown command', ]
cooked = json.dumps(raw)
try:
- driver.filter_hosts(self.zone_manager, cooked)
+ hf.filter_hosts(self.zone_manager, cooked)
self.fail("Should give KeyError")
except KeyError, e:
pass
- self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([])))
- self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({})))
- self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps(
- ['not', True, False, True, False]
- )))
+ self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
+ self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
+ self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
+ ['not', True, False, True, False])))
try:
- driver.filter_hosts(self.zone_manager, json.dumps(
- 'not', True, False, True, False
- ))
+ hf.filter_hosts(self.zone_manager, json.dumps(
+ 'not', True, False, True, False))
self.fail("Should give KeyError")
except KeyError, e:
pass
- self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
- ['=', '$foo', 100]
- )))
- self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
- ['=', '$.....', 100]
- )))
- self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
- ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
- )))
-
- self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
- ['=', {}, ['>', '$missing....foo']]
- )))
+ self.assertFalse(hf.filter_hosts(self.zone_manager,
+ json.dumps(['=', '$foo', 100])))
+ self.assertFalse(hf.filter_hosts(self.zone_manager,
+ json.dumps(['=', '$.....', 100])))
+ self.assertFalse(hf.filter_hosts(self.zone_manager,
+ json.dumps(
+ ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
+
+ self.assertFalse(hf.filter_hosts(self.zone_manager,
+ json.dumps(['=', {}, ['>', '$missing....foo']])))
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 4efdd6ae9..1fac4e4e6 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -18,6 +18,7 @@ import eventlet
import mox
import os
import re
+import shutil
import sys
from xml.etree.ElementTree import fromstring as xml_to_tree
@@ -645,6 +646,8 @@ class LibvirtConnTestCase(test.TestCase):
except Exception, e:
count = (0 <= str(e.message).find('Unexpected method call'))
+ shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name))
+
self.assertTrue(count)
def test_get_host_ip_addr(self):
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 916fca55e..ad73a3a69 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -250,6 +250,7 @@ class QuotaTestCase(test.TestCase):
volume.API().create,
self.context,
size=10,
+ snapshot_id=None,
name='',
description='')
for volume_id in volume_ids:
@@ -263,6 +264,7 @@ class QuotaTestCase(test.TestCase):
volume.API().create,
self.context,
size=10,
+ snapshot_id=None,
name='',
description='')
for volume_id in volume_ids:
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index 236d12434..4f10ee6af 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -45,10 +45,11 @@ class VolumeTestCase(test.TestCase):
self.context = context.get_admin_context()
@staticmethod
- def _create_volume(size='0'):
+ def _create_volume(size='0', snapshot_id=None):
"""Create a volume object."""
vol = {}
vol['size'] = size
+ vol['snapshot_id'] = snapshot_id
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['availability_zone'] = FLAGS.storage_availability_zone
@@ -69,6 +70,25 @@ class VolumeTestCase(test.TestCase):
self.context,
volume_id)
+ def test_create_volume_from_snapshot(self):
+ """Test volume can be created from a snapshot."""
+ volume_src_id = self._create_volume()
+ self.volume.create_volume(self.context, volume_src_id)
+ snapshot_id = self._create_snapshot(volume_src_id)
+ self.volume.create_snapshot(self.context, volume_src_id, snapshot_id)
+ volume_dst_id = self._create_volume(0, snapshot_id)
+ self.volume.create_volume(self.context, volume_dst_id, snapshot_id)
+ self.assertEqual(volume_dst_id, db.volume_get(
+ context.get_admin_context(),
+ volume_dst_id).id)
+ self.assertEqual(snapshot_id, db.volume_get(
+ context.get_admin_context(),
+ volume_dst_id).snapshot_id)
+
+ self.volume.delete_volume(self.context, volume_dst_id)
+ self.volume.delete_snapshot(self.context, snapshot_id)
+ self.volume.delete_volume(self.context, volume_src_id)
+
def test_too_big_volume(self):
"""Ensure failure if a too large of a volume is requested."""
# FIXME(vish): validation needs to move into the data layer in
@@ -176,6 +196,34 @@ class VolumeTestCase(test.TestCase):
# This will allow us to test cross-node interactions
pass
+ @staticmethod
+ def _create_snapshot(volume_id, size='0'):
+ """Create a snapshot object."""
+ snap = {}
+ snap['volume_size'] = size
+ snap['user_id'] = 'fake'
+ snap['project_id'] = 'fake'
+ snap['volume_id'] = volume_id
+ snap['status'] = "creating"
+ return db.snapshot_create(context.get_admin_context(), snap)['id']
+
+ def test_create_delete_snapshot(self):
+ """Test snapshot can be created and deleted."""
+ volume_id = self._create_volume()
+ self.volume.create_volume(self.context, volume_id)
+ snapshot_id = self._create_snapshot(volume_id)
+ self.volume.create_snapshot(self.context, volume_id, snapshot_id)
+ self.assertEqual(snapshot_id,
+ db.snapshot_get(context.get_admin_context(),
+ snapshot_id).id)
+
+ self.volume.delete_snapshot(self.context, snapshot_id)
+ self.assertRaises(exception.NotFound,
+ db.snapshot_get,
+ self.context,
+ snapshot_id)
+ self.volume.delete_volume(self.context, volume_id)
+
class DriverTestCase(test.TestCase):
"""Base Test class for Drivers."""
diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py
index fdcde34c9..72b74be20 100644
--- a/nova/tests/test_zone_aware_scheduler.py
+++ b/nova/tests/test_zone_aware_scheduler.py
@@ -38,16 +38,16 @@ class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
class FakeZoneManager(zone_manager.ZoneManager):
def __init__(self):
self.service_states = {
- 'host1': {
- 'compute': {'ram': 1000}
- },
- 'host2': {
- 'compute': {'ram': 2000}
- },
- 'host3': {
- 'compute': {'ram': 3000}
- }
- }
+ 'host1': {
+ 'compute': {'ram': 1000},
+ },
+ 'host2': {
+ 'compute': {'ram': 2000},
+ },
+ 'host3': {
+ 'compute': {'ram': 3000},
+ },
+ }
class FakeEmptyZoneManager(zone_manager.ZoneManager):
@@ -116,4 +116,6 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
sched.set_zone_manager(zm)
fake_context = {}
- self.assertRaises(driver.NoValidHost, sched.schedule, fake_context, {})
+ self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
+ fake_context, 1,
+ dict(host_filter=None, instance_type={}))
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 5ac376e46..0225797d7 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -82,6 +82,21 @@ class FakeConnection(driver.ComputeDriver):
def __init__(self):
self.instances = {}
+ self.host_status = {
+ 'host_name-description': 'Fake Host',
+ 'host_hostname': 'fake-mini',
+ 'host_memory_total': 8000000000,
+ 'host_memory_overhead': 10000000,
+ 'host_memory_free': 7900000000,
+ 'host_memory_free_computed': 7900000000,
+ 'host_other_config': {},
+ 'host_ip_address': '192.168.1.109',
+ 'host_cpu_info': {},
+ 'disk_available': 500000000000,
+ 'disk_total': 600000000000,
+ 'disk_used': 100000000000,
+ 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
+ 'host_name_label': 'fake-mini'}
@classmethod
def instance(cls):
@@ -456,3 +471,11 @@ class FakeConnection(driver.ComputeDriver):
def test_remove_vm(self, instance_name):
""" Removes the named VM, as if it crashed. For testing"""
self.instances.pop(instance_name)
+
+ def update_host_status(self):
+ """Return fake Host Status of ram, disk, network."""
+ return self.host_status
+
+ def get_host_stats(self, refresh=False):
+ """Return fake Host Status of ram, disk, network."""
+ return self.host_status
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index de2497a76..20986d4d5 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -116,7 +116,7 @@
</serial>
#if $getVar('vncserver_host', False)
- <graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='${vncserver_host}'/>
+ <graphics type='vnc' port='-1' autoport='yes' keymap='${vnc_keymap}' listen='${vncserver_host}'/>
#end if
</devices>
</domain>
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 94a703954..f563d0681 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -488,19 +488,27 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def pause(self, instance, callback):
- raise exception.ApiError("pause not supported for libvirt.")
+ """Pause VM instance"""
+ dom = self._lookup_by_name(instance.name)
+ dom.suspend()
@exception.wrap_exception
def unpause(self, instance, callback):
- raise exception.ApiError("unpause not supported for libvirt.")
+ """Unpause paused VM instance"""
+ dom = self._lookup_by_name(instance.name)
+ dom.resume()
@exception.wrap_exception
def suspend(self, instance, callback):
- raise exception.ApiError("suspend not supported for libvirt")
+ """Suspend the specified instance"""
+ dom = self._lookup_by_name(instance.name)
+ dom.managedSave(0)
@exception.wrap_exception
def resume(self, instance, callback):
- raise exception.ApiError("resume not supported for libvirt")
+ """resume the specified instance"""
+ dom = self._lookup_by_name(instance.name)
+ dom.create()
@exception.wrap_exception
def rescue(self, instance):
@@ -962,6 +970,7 @@ class LibvirtConnection(driver.ComputeDriver):
if FLAGS.vnc_enabled:
if FLAGS.libvirt_type != 'lxc':
xml_info['vncserver_host'] = FLAGS.vncserver_host
+ xml_info['vnc_keymap'] = FLAGS.vnc_keymap
if not rescue:
if instance['kernel_id']:
xml_info['kernel'] = xml_info['basepath'] + "/kernel"
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index c3e79a92f..6d7149841 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -590,11 +590,11 @@ class VMWareVMOps(object):
def pause(self, instance, callback):
"""Pause a VM instance."""
- raise exception.APIError("pause not supported for vmwareapi")
+ raise exception.ApiError("pause not supported for vmwareapi")
def unpause(self, instance, callback):
"""Un-Pause a VM instance."""
- raise exception.APIError("unpause not supported for vmwareapi")
+ raise exception.ApiError("unpause not supported for vmwareapi")
def suspend(self, instance, callback):
"""Suspend the specified instance."""
@@ -673,7 +673,7 @@ class VMWareVMOps(object):
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
- raise exception.APIError("get_diagnostics not implemented for "
+ raise exception.ApiError("get_diagnostics not implemented for "
"vmwareapi")
def get_console_output(self, instance):
diff --git a/nova/vnc/__init__.py b/nova/vnc/__init__.py
index b5b00e44e..859bfd65f 100644
--- a/nova/vnc/__init__.py
+++ b/nova/vnc/__init__.py
@@ -32,3 +32,5 @@ flags.DEFINE_string('vncserver_host', '0.0.0.0',
'the host interface on which vnc server should listen')
flags.DEFINE_bool('vnc_enabled', True,
'enable vnc related features')
+flags.DEFINE_string('vnc_keymap', 'en-us',
+ 'keymap for vnc')
diff --git a/nova/volume/api.py b/nova/volume/api.py
index 09befb647..5804955f7 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -39,7 +39,14 @@ LOG = logging.getLogger('nova.volume')
class API(base.Base):
"""API for interacting with the volume manager."""
- def create(self, context, size, name, description):
+ def create(self, context, size, snapshot_id, name, description):
+ if snapshot_id != None:
+ snapshot = self.get_snapshot(context, snapshot_id)
+ if snapshot['status'] != "available":
+ raise exception.ApiError(
+ _("Snapshot status must be available"))
+ size = snapshot['volume_size']
+
if quota.allowed_volumes(context, 1, size) < 1:
pid = context.project_id
LOG.warn(_("Quota exceeeded for %(pid)s, tried to create"
@@ -51,6 +58,7 @@ class API(base.Base):
'size': size,
'user_id': context.user_id,
'project_id': context.project_id,
+ 'snapshot_id': snapshot_id,
'availability_zone': FLAGS.storage_availability_zone,
'status': "creating",
'attach_status': "detached",
@@ -62,7 +70,8 @@ class API(base.Base):
FLAGS.scheduler_topic,
{"method": "create_volume",
"args": {"topic": FLAGS.volume_topic,
- "volume_id": volume['id']}})
+ "volume_id": volume['id'],
+ "snapshot_id": snapshot_id}})
return volume
def delete(self, context, volume_id):
@@ -90,6 +99,15 @@ class API(base.Base):
return self.db.volume_get_all(context)
return self.db.volume_get_all_by_project(context, context.project_id)
+ def get_snapshot(self, context, snapshot_id):
+ rv = self.db.snapshot_get(context, snapshot_id)
+ return dict(rv.iteritems())
+
+ def get_all_snapshots(self, context):
+ if context.is_admin:
+ return self.db.snapshot_get_all(context)
+ return self.db.snapshot_get_all_by_project(context, context.project_id)
+
def check_attach(self, context, volume_id):
volume = self.get(context, volume_id)
# TODO(vish): abstract status checking?
@@ -110,3 +128,38 @@ class API(base.Base):
self.db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": "remove_volume",
"args": {'volume_id': volume_id}})
+
+ def create_snapshot(self, context, volume_id, name, description):
+ volume = self.get(context, volume_id)
+ if volume['status'] != "available":
+ raise exception.ApiError(_("Volume status must be available"))
+
+ options = {
+ 'volume_id': volume_id,
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'status': "creating",
+ 'progress': '0%',
+ 'volume_size': volume['size'],
+ 'display_name': name,
+ 'display_description': description}
+
+ snapshot = self.db.snapshot_create(context, options)
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "create_snapshot",
+ "args": {"topic": FLAGS.volume_topic,
+ "volume_id": volume_id,
+ "snapshot_id": snapshot['id']}})
+ return snapshot
+
+ def delete_snapshot(self, context, snapshot_id):
+ snapshot = self.get_snapshot(context, snapshot_id)
+ if snapshot['status'] != "available":
+ raise exception.ApiError(_("Snapshot status must be available"))
+ self.db.snapshot_update(context, snapshot_id, {'status': 'deleting'})
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "delete_snapshot",
+ "args": {"topic": FLAGS.volume_topic,
+ "snapshot_id": snapshot_id}})
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 55307ad9b..87e13277f 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -90,42 +90,97 @@ class VolumeDriver(object):
raise exception.Error(_("volume group %s doesn't exist")
% FLAGS.volume_group)
- def create_volume(self, volume):
- """Creates a logical volume. Can optionally return a Dictionary of
- changes to the volume object to be persisted."""
- if int(volume['size']) == 0:
- sizestr = '100M'
- else:
- sizestr = '%sG' % volume['size']
+ def _create_volume(self, volume_name, sizestr):
self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n',
- volume['name'],
- FLAGS.volume_group)
+ volume_name, FLAGS.volume_group)
- def delete_volume(self, volume):
- """Deletes a logical volume."""
+ def _copy_volume(self, srcstr, deststr, size_in_g):
+ self._execute('sudo', 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,
+ 'count=%d' % (size_in_g * 1024), 'bs=1M')
+
+ def _volume_not_present(self, volume_name):
+ path_name = '%s/%s' % (FLAGS.volume_group, volume_name)
try:
- self._try_execute('sudo', 'lvdisplay',
- '%s/%s' %
- (FLAGS.volume_group,
- volume['name']))
+ self._try_execute('sudo', 'lvdisplay', path_name)
except Exception as e:
- # If the volume isn't present, then don't attempt to delete
+ # If the volume isn't present
return True
+ return False
+ def _delete_volume(self, volume, size_in_g):
+ """Deletes a logical volume."""
# zero out old volumes to prevent data leaking between users
# TODO(ja): reclaiming space should be done lazy and low priority
- self._execute('sudo', 'dd', 'if=/dev/zero',
- 'of=%s' % self.local_path(volume),
- 'count=%d' % (volume['size'] * 1024),
- 'bs=1M')
+ self._copy_volume('/dev/zero', self.local_path(volume), size_in_g)
self._try_execute('sudo', 'lvremove', '-f', "%s/%s" %
(FLAGS.volume_group,
- volume['name']))
+ self._escape_snapshot(volume['name'])))
+
+ def _sizestr(self, size_in_g):
+ if int(size_in_g) == 0:
+ return '100M'
+ return '%sG' % size_in_g
+
+ # Linux LVM reserves name that starts with snapshot, so that
+ # such volume name can't be created. Mangle it.
+ def _escape_snapshot(self, snapshot_name):
+ if not snapshot_name.startswith('snapshot'):
+ return snapshot_name
+ return '_' + snapshot_name
+
+ def create_volume(self, volume):
+ """Creates a logical volume. Can optionally return a Dictionary of
+ changes to the volume object to be persisted."""
+ self._create_volume(volume['name'], self._sizestr(volume['size']))
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a volume from a snapshot."""
+ self._create_volume(volume['name'], self._sizestr(volume['size']))
+ self._copy_volume(self.local_path(snapshot), self.local_path(volume),
+ snapshot['volume_size'])
+
+ def delete_volume(self, volume):
+ """Deletes a logical volume."""
+ if self._volume_not_present(volume['name']):
+ # If the volume isn't present, then don't attempt to delete
+ return True
+
+ # TODO(yamahata): lvm can't delete origin volume only without
+ # deleting derived snapshots. Can we do something fancy?
+ out, err = self._execute('sudo', 'lvdisplay', '--noheading',
+ '-C', '-o', 'Attr',
+ '%s/%s' % (FLAGS.volume_group,
+ volume['name']))
+ # fake_execute returns None resulting unit test error
+ if out:
+ out = out.strip()
+ if (out[0] == 'o') or (out[0] == 'O'):
+ raise exception.VolumeIsBusy(volume_name=volume['name'])
+
+ self._delete_volume(volume, volume['size'])
+
+ def create_snapshot(self, snapshot):
+ """Creates a snapshot."""
+ orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name'])
+ self._try_execute('sudo', 'lvcreate', '-L',
+ self._sizestr(snapshot['volume_size']),
+ '--name', self._escape_snapshot(snapshot['name']),
+ '--snapshot', orig_lv_name)
+
+ def delete_snapshot(self, snapshot):
+ """Deletes a snapshot."""
+ if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
+ # If the snapshot isn't present, then don't attempt to delete
+ return True
+
+ # TODO(yamahata): zeroing out the whole snapshot triggers COW.
+ # it's quite slow.
+ self._delete_volume(snapshot, snapshot['volume_size'])
def local_path(self, volume):
# NOTE(vish): stops deprecation warning
escaped_group = FLAGS.volume_group.replace('-', '--')
- escaped_name = volume['name'].replace('-', '--')
+ escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def ensure_export(self, context, volume):
@@ -559,6 +614,18 @@ class RBDDriver(VolumeDriver):
self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
'rm', volume['name'])
+ def create_snapshot(self, snapshot):
+ """Creates an rbd snapshot"""
+ self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
+ 'snap', 'create', '--snap', snapshot['name'],
+ snapshot['volume_name'])
+
+ def delete_snapshot(self, snapshot):
+ """Deletes an rbd snapshot"""
+ self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
+ 'snap', 'rm', '--snap', snapshot['name'],
+ snapshot['volume_name'])
+
def local_path(self, volume):
"""Returns the path of the rbd volume."""
# This is the same as the remote path
@@ -600,18 +667,31 @@ class SheepdogDriver(VolumeDriver):
def create_volume(self, volume):
"""Creates a sheepdog volume"""
- if int(volume['size']) == 0:
- sizestr = '100M'
- else:
- sizestr = '%sG' % volume['size']
self._try_execute('qemu-img', 'create',
"sheepdog:%s" % volume['name'],
- sizestr)
+ self._sizestr(volume['size']))
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Creates a sheepdog volume from a snapshot."""
+ self._try_execute('qemu-img', 'create', '-b',
+ "sheepdog:%s:%s" % (snapshot['volume_name'],
+ snapshot['name']),
+ "sheepdog:%s" % volume['name'])
def delete_volume(self, volume):
"""Deletes a logical volume"""
self._try_execute('collie', 'vdi', 'delete', volume['name'])
+ def create_snapshot(self, snapshot):
+ """Creates a sheepdog snapshot"""
+ self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'],
+ "sheepdog:%s" % snapshot['volume_name'])
+
+ def delete_snapshot(self, snapshot):
+ """Deletes a sheepdog snapshot"""
+ self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'],
+ '-s', snapshot['name'])
+
def local_path(self, volume):
return "sheepdog:%s" % volume['name']
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index 2178389ce..ff53f0701 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -90,7 +90,7 @@ class VolumeManager(manager.SchedulerDependentManager):
else:
LOG.info(_("volume %s: skipping export"), volume['name'])
- def create_volume(self, context, volume_id):
+ def create_volume(self, context, volume_id, snapshot_id=None):
"""Creates and exports the volume."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
@@ -108,7 +108,13 @@ class VolumeManager(manager.SchedulerDependentManager):
vol_size = volume_ref['size']
LOG.debug(_("volume %(vol_name)s: creating lv of"
" size %(vol_size)sG") % locals())
- model_update = self.driver.create_volume(volume_ref)
+ if snapshot_id == None:
+ model_update = self.driver.create_volume(volume_ref)
+ else:
+ snapshot_ref = self.db.snapshot_get(context, snapshot_id)
+ model_update = self.driver.create_volume_from_snapshot(
+ volume_ref,
+ snapshot_ref)
if model_update:
self.db.volume_update(context, volume_ref['id'], model_update)
@@ -142,6 +148,12 @@ class VolumeManager(manager.SchedulerDependentManager):
self.driver.remove_export(context, volume_ref)
LOG.debug(_("volume %s: deleting"), volume_ref['name'])
self.driver.delete_volume(volume_ref)
+ except exception.VolumeIsBusy, e:
+ LOG.debug(_("volume %s: volume is busy"), volume_ref['name'])
+ self.driver.ensure_export(context, volume_ref)
+ self.db.volume_update(context, volume_ref['id'],
+ {'status': 'available'})
+ return True
except Exception:
self.db.volume_update(context,
volume_ref['id'],
@@ -152,6 +164,49 @@ class VolumeManager(manager.SchedulerDependentManager):
LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
return True
+ def create_snapshot(self, context, volume_id, snapshot_id):
+ """Creates and exports the snapshot."""
+ context = context.elevated()
+ snapshot_ref = self.db.snapshot_get(context, snapshot_id)
+ LOG.info(_("snapshot %s: creating"), snapshot_ref['name'])
+
+ try:
+ snap_name = snapshot_ref['name']
+ LOG.debug(_("snapshot %(snap_name)s: creating") % locals())
+ model_update = self.driver.create_snapshot(snapshot_ref)
+ if model_update:
+ self.db.snapshot_update(context, snapshot_ref['id'],
+ model_update)
+
+ except Exception:
+ self.db.snapshot_update(context,
+ snapshot_ref['id'], {'status': 'error'})
+ raise
+
+ self.db.snapshot_update(context,
+ snapshot_ref['id'], {'status': 'available',
+ 'progress': '100%'})
+ LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name'])
+ return snapshot_id
+
+ def delete_snapshot(self, context, snapshot_id):
+ """Deletes and unexports snapshot."""
+ context = context.elevated()
+ snapshot_ref = self.db.snapshot_get(context, snapshot_id)
+
+ try:
+ LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name'])
+ self.driver.delete_snapshot(snapshot_ref)
+ except Exception:
+ self.db.snapshot_update(context,
+ snapshot_ref['id'],
+ {'status': 'error_deleting'})
+ raise
+
+ self.db.snapshot_destroy(context, snapshot_id)
+ LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
+ return True
+
def setup_compute_volume(self, context, volume_id):
"""Setup remote volume on compute host.
diff --git a/nova/wsgi.py b/nova/wsgi.py
index ea9bb963d..33ba852bc 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -85,36 +85,7 @@ class Server(object):
class Request(webob.Request):
-
- def best_match_content_type(self):
- """Determine the most acceptable content-type.
-
- Based on the query extension then the Accept header.
-
- """
- parts = self.path.rsplit('.', 1)
-
- if len(parts) > 1:
- format = parts[1]
- if format in ['json', 'xml']:
- return 'application/{0}'.format(parts[1])
-
- ctypes = ['application/json', 'application/xml']
- bm = self.accept.best_match(ctypes)
-
- return bm or 'application/json'
-
- def get_content_type(self):
- allowed_types = ("application/xml", "application/json")
- if not "Content-Type" in self.headers:
- msg = _("Missing Content-Type")
- LOG.debug(msg)
- raise webob.exc.HTTPBadRequest(msg)
- type = self.content_type
- if type in allowed_types:
- return type
- LOG.debug(_("Wrong Content-Type: %s") % type)
- raise webob.exc.HTTPBadRequest("Invalid content type")
+ pass
class Application(object):
@@ -289,8 +260,8 @@ class Router(object):
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
- well and have your controller be a wsgi.Controller, who will route
- the request to the action method.
+ well and have your controller be an object that can route
+ the request to the action-specific method.
Examples:
mapper = routes.Mapper()
@@ -338,223 +309,6 @@ class Router(object):
return app
-class Controller(object):
- """WSGI app that dispatched to methods.
-
- WSGI app that reads routing information supplied by RoutesMiddleware
- and calls the requested action method upon itself. All action methods
- must, in addition to their normal parameters, accept a 'req' argument
- which is the incoming wsgi.Request. They raise a webob.exc exception,
- or return a dict which will be serialized by requested content type.
-
- """
-
- @webob.dec.wsgify(RequestClass=Request)
- def __call__(self, req):
- """Call the method specified in req.environ by RoutesMiddleware."""
- arg_dict = req.environ['wsgiorg.routing_args'][1]
- action = arg_dict['action']
- method = getattr(self, action)
- LOG.debug("%s %s" % (req.method, req.url))
- del arg_dict['controller']
- del arg_dict['action']
- if 'format' in arg_dict:
- del arg_dict['format']
- arg_dict['req'] = req
- result = method(**arg_dict)
-
- if type(result) is dict:
- content_type = req.best_match_content_type()
- default_xmlns = self.get_default_xmlns(req)
- body = self._serialize(result, content_type, default_xmlns)
-
- response = webob.Response()
- response.headers['Content-Type'] = content_type
- response.body = body
- msg_dict = dict(url=req.url, status=response.status_int)
- msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
- LOG.debug(msg)
- return response
- else:
- return result
-
- def _serialize(self, data, content_type, default_xmlns):
- """Serialize the given dict to the provided content_type.
-
- Uses self._serialization_metadata if it exists, which is a dict mapping
- MIME types to information needed to serialize to that type.
-
- """
- _metadata = getattr(type(self), '_serialization_metadata', {})
-
- serializer = Serializer(_metadata, default_xmlns)
- try:
- return serializer.serialize(data, content_type)
- except exception.InvalidContentType:
- raise webob.exc.HTTPNotAcceptable()
-
- def _deserialize(self, data, content_type):
- """Deserialize the request body to the specefied content type.
-
- Uses self._serialization_metadata if it exists, which is a dict mapping
- MIME types to information needed to serialize to that type.
-
- """
- _metadata = getattr(type(self), '_serialization_metadata', {})
- serializer = Serializer(_metadata)
- return serializer.deserialize(data, content_type)
-
- def get_default_xmlns(self, req):
- """Provide the XML namespace to use if none is otherwise specified."""
- return None
-
-
-class Serializer(object):
- """Serializes and deserializes dictionaries to certain MIME types."""
-
- def __init__(self, metadata=None, default_xmlns=None):
- """Create a serializer based on the given WSGI environment.
-
- 'metadata' is an optional dict mapping MIME types to information
- needed to serialize a dictionary to that type.
-
- """
- self.metadata = metadata or {}
- self.default_xmlns = default_xmlns
-
- def _get_serialize_handler(self, content_type):
- handlers = {
- 'application/json': self._to_json,
- 'application/xml': self._to_xml,
- }
-
- try:
- return handlers[content_type]
- except Exception:
- raise exception.InvalidContentType(content_type=content_type)
-
- def serialize(self, data, content_type):
- """Serialize a dictionary into the specified content type."""
- return self._get_serialize_handler(content_type)(data)
-
- def deserialize(self, datastring, content_type):
- """Deserialize a string to a dictionary.
-
- The string must be in the format of a supported MIME type.
-
- """
- return self.get_deserialize_handler(content_type)(datastring)
-
- def get_deserialize_handler(self, content_type):
- handlers = {
- 'application/json': self._from_json,
- 'application/xml': self._from_xml,
- }
-
- try:
- return handlers[content_type]
- except Exception:
- raise exception.InvalidContentType(content_type=content_type)
-
- def _from_json(self, datastring):
- return utils.loads(datastring)
-
- def _from_xml(self, datastring):
- xmldata = self.metadata.get('application/xml', {})
- plurals = set(xmldata.get('plurals', {}))
- node = minidom.parseString(datastring).childNodes[0]
- return {node.nodeName: self._from_xml_node(node, plurals)}
-
- def _from_xml_node(self, node, listnames):
- """Convert a minidom node to a simple Python type.
-
- listnames is a collection of names of XML nodes whose subnodes should
- be considered list items.
-
- """
- if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
- return node.childNodes[0].nodeValue
- elif node.nodeName in listnames:
- return [self._from_xml_node(n, listnames) for n in node.childNodes]
- else:
- result = dict()
- for attr in node.attributes.keys():
- result[attr] = node.attributes[attr].nodeValue
- for child in node.childNodes:
- if child.nodeType != node.TEXT_NODE:
- result[child.nodeName] = self._from_xml_node(child,
- listnames)
- return result
-
- def _to_json(self, data):
- return utils.dumps(data)
-
- def _to_xml(self, data):
- metadata = self.metadata.get('application/xml', {})
- # We expect data to contain a single key which is the XML root.
- root_key = data.keys()[0]
- doc = minidom.Document()
- node = self._to_xml_node(doc, metadata, root_key, data[root_key])
-
- xmlns = node.getAttribute('xmlns')
- if not xmlns and self.default_xmlns:
- node.setAttribute('xmlns', self.default_xmlns)
-
- return node.toprettyxml(indent=' ')
-
- def _to_xml_node(self, doc, metadata, nodename, data):
- """Recursive method to convert data members to XML nodes."""
- result = doc.createElement(nodename)
-
- # Set the xml namespace if one is specified
- # TODO(justinsb): We could also use prefixes on the keys
- xmlns = metadata.get('xmlns', None)
- if xmlns:
- result.setAttribute('xmlns', xmlns)
-
- if type(data) is list:
- collections = metadata.get('list_collections', {})
- if nodename in collections:
- metadata = collections[nodename]
- for item in data:
- node = doc.createElement(metadata['item_name'])
- node.setAttribute(metadata['item_key'], str(item))
- result.appendChild(node)
- return result
- singular = metadata.get('plurals', {}).get(nodename, None)
- if singular is None:
- if nodename.endswith('s'):
- singular = nodename[:-1]
- else:
- singular = 'item'
- for item in data:
- node = self._to_xml_node(doc, metadata, singular, item)
- result.appendChild(node)
- elif type(data) is dict:
- collections = metadata.get('dict_collections', {})
- if nodename in collections:
- metadata = collections[nodename]
- for k, v in data.items():
- node = doc.createElement(metadata['item_name'])
- node.setAttribute(metadata['item_key'], str(k))
- text = doc.createTextNode(str(v))
- node.appendChild(text)
- result.appendChild(node)
- return result
- attrs = metadata.get('attributes', {}).get(nodename, {})
- for k, v in data.items():
- if k in attrs:
- result.setAttribute(k, str(v))
- else:
- node = self._to_xml_node(doc, metadata, k, v)
- result.appendChild(node)
- else:
- # Type is atom
- node = doc.createTextNode(str(data))
- result.appendChild(node)
- return result
-
-
def paste_config_file(basename):
"""Find the best location in the system for a paste config file.
diff --git a/plugins/xenserver/networking/etc/init.d/openvswitch-nova b/plugins/xenserver/networking/etc/init.d/openvswitch-nova
new file mode 100755
index 000000000..8672a69b8
--- /dev/null
+++ b/plugins/xenserver/networking/etc/init.d/openvswitch-nova
@@ -0,0 +1,96 @@
+#!/bin/bash
+#
+# openvswitch-nova
+#
+# chkconfig: 2345 96 89
+# description: Apply initial OVS flows for Nova
+
+# Copyright 2011 OpenStack LLC.
+# Copyright (C) 2009, 2010, 2011 Nicira Networks, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# source function library
+if [ -f /etc/init.d/functions ]; then
+ . /etc/init.d/functions
+elif [ -f /etc/rc.d/init.d/functions ]; then
+ . /etc/rc.d/init.d/functions
+elif [ -f /lib/lsb/init-functions ]; then
+ . /lib/lsb/init-functions
+else
+ echo "$0: missing LSB shell function library" >&2
+ exit 1
+fi
+
+OVS_CONFIGURE_BASE_FLOWS=/etc/xensource/scripts/ovs_configure_base_flows.py
+
+if test -e /etc/sysconfig/openvswitch-nova; then
+ . /etc/sysconfig/openvswitch-nova
+else
+ echo "$0: missing configuration file: /etc/sysconfig/openvswitch-nova"
+ exit 1
+fi
+
+if test -e /etc/xensource/network.conf; then
+ NETWORK_MODE=$(cat /etc/xensource/network.conf)
+fi
+
+case ${NETWORK_MODE:=openvswitch} in
+ vswitch|openvswitch)
+ ;;
+ bridge)
+ exit 0
+ ;;
+ *)
+ echo "Open vSwitch disabled (/etc/xensource/network.conf is invalid)" >&2
+ exit 0
+ ;;
+esac
+
+function run_ovs_conf_base_flows {
+ # expected format: DEVICE_BRIDGES="eth0:xenbr0 eth1:xenbr1"
+ for pair in $DEVICE_BRIDGES; do
+ # below in $info, physical device is [0], bridge name is [1]
+ info=${pair//:/ }
+ /usr/bin/python $OVS_CONFIGURE_BASE_FLOWS $1 ${info[0]} ${info[1]}
+ done
+}
+
+function start {
+ run_ovs_conf_base_flows online
+}
+
+function stop {
+ run_ovs_conf_base_flows offline
+}
+
+function restart {
+ run_ovs_conf_base_flows reset
+}
+
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ restart)
+ restart
+ ;;
+ *)
+ echo "usage: openvswitch-nova [start|stop|restart]"
+ exit 1
+ ;;
+esac
diff --git a/plugins/xenserver/networking/etc/sysconfig/openvswitch-nova b/plugins/xenserver/networking/etc/sysconfig/openvswitch-nova
new file mode 100644
index 000000000..829782fb6
--- /dev/null
+++ b/plugins/xenserver/networking/etc/sysconfig/openvswitch-nova
@@ -0,0 +1 @@
+#DEVICE_BRIDGES="eth0:xenbr0 eth1:xenbr1"
diff --git a/plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules b/plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules
new file mode 100644
index 000000000..b179f0847
--- /dev/null
+++ b/plugins/xenserver/networking/etc/udev/rules.d/xen-openvswitch-nova.rules
@@ -0,0 +1,3 @@
+SUBSYSTEM=="xen-backend", KERNEL=="vif*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all"
+# is this one needed?
+#SUBSYSTEM=="net", KERNEL=="tap*", RUN+="/etc/xensource/scripts/ovs_configure_vif_flows.py $env{ACTION} %k all"
diff --git a/plugins/xenserver/networking/etc/xensource/scripts/novalib.py b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py
new file mode 100644
index 000000000..dcbee3ded
--- /dev/null
+++ b/plugins/xenserver/networking/etc/xensource/scripts/novalib.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import os
+import subprocess
+
+
+def execute_get_output(*command):
+ """Execute and return stdout"""
+ devnull = open(os.devnull, 'w')
+ command = map(str, command)
+ proc = subprocess.Popen(command, close_fds=True,
+ stdout=subprocess.PIPE, stderr=devnull)
+ devnull.close()
+ return proc.stdout.read().strip()
+
+
+def execute(*command):
+ """Execute without returning stdout"""
+ devnull = open(os.devnull, 'w')
+ command = map(str, command)
+ proc = subprocess.Popen(command, close_fds=True,
+ stdout=subprocess.PIPE, stderr=devnull)
+ devnull.close()
diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py
new file mode 100755
index 000000000..514a43a2d
--- /dev/null
+++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_base_flows.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+This script is used to configure base openvswitch flows for XenServer hosts.
+"""
+
+import os
+import sys
+
+
+from novalib import execute, execute_get_output
+
+
+def main(command, phys_dev_name, bridge_name):
+ ovs_ofctl = lambda *rule: execute('/usr/bin/ovs-ofctl', *rule)
+
+ # always clear all flows first
+ ovs_ofctl('del-flows', bridge_name)
+
+ if command in ('online', 'reset'):
+ pnic_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get',
+ 'Interface', phys_dev_name, 'ofport')
+
+ # these flows are lower priority than all VM-specific flows.
+
+ # allow all traffic from the physical NIC, as it is trusted (i.e.,
+ # from a filtered vif, or from the physical infrastructure)
+ ovs_ofctl('add-flow', bridge_name,
+ "priority=2,in_port=%s,actions=normal" % pnic_ofport)
+
+ # default drop
+ ovs_ofctl('add-flow', bridge_name, 'priority=1,actions=drop')
+
+
+if __name__ == "__main__":
+ if len(sys.argv) != 4 or sys.argv[1] not in ('online', 'offline', 'reset'):
+ print sys.argv
+ script_name = os.path.basename(sys.argv[0])
+ print "This script configures base ovs flows."
+ print "usage: %s [online|offline|reset] phys-dev-name bridge-name" \
+ % script_name
+ print " ex: %s online eth0 xenbr0" % script_name
+ sys.exit(1)
+ else:
+ command, phys_dev_name, bridge_name = sys.argv[1:4]
+ main(command, phys_dev_name, bridge_name)
diff --git a/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py
new file mode 100755
index 000000000..accd08b91
--- /dev/null
+++ b/plugins/xenserver/networking/etc/xensource/scripts/ovs_configure_vif_flows.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+This script is used to configure openvswitch flows on XenServer hosts.
+"""
+
+import os
+import sys
+
+# This is written to Python 2.4, since that is what is available on XenServer
+import netaddr
+import simplejson as json
+
+from novalib import execute, execute_get_output
+
+
+OVS_OFCTL = '/usr/bin/ovs-ofctl'
+
+
+class OvsFlow(object):
+ def __init__(self, bridge, params):
+ self.bridge = bridge
+ self.params = params
+
+ def add(self, rule):
+ execute(OVS_OFCTL, 'add-flow', self.bridge, rule % self.params)
+
+ def clear_flows(self, ofport):
+ execute(OVS_OFCTL, 'del-flows', self.bridge, "in_port=%s" % ofport)
+
+
+def main(command, vif_raw, net_type):
+ if command not in ('online', 'offline'):
+ return
+
+ vif_name, dom_id, vif_index = vif_raw.split('-')
+ vif = "%s%s.%s" % (vif_name, dom_id, vif_index)
+ bridge = "xenbr%s" % vif_index
+
+ xsls = execute_get_output('/usr/bin/xenstore-ls',
+ '/local/domain/%s/vm-data/networking' % dom_id)
+ macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
+
+ for mac in macs:
+ xsread = execute_get_output('/usr/bin/xenstore-read',
+ '/local/domain/%s/vm-data/networking/%s' %
+ (dom_id, mac))
+ data = json.loads(xsread)
+ if data["label"] == "public":
+ this_vif = "vif%s.0" % dom_id
+ else:
+ this_vif = "vif%s.1" % dom_id
+
+ if vif == this_vif:
+ vif_ofport = execute_get_output('/usr/bin/ovs-vsctl', 'get',
+ 'Interface', vif, 'ofport')
+
+ params = dict(VIF_NAME=vif,
+ MAC=data['mac'],
+ OF_PORT=vif_ofport)
+
+ ovs = OvsFlow(bridge, params)
+
+ if command == 'offline':
+ # I haven't found a way to clear only IPv4 or IPv6 rules.
+ ovs.clear_flows(vif_ofport)
+
+ if command == 'online':
+ if net_type in ('ipv4', 'all') and 'ips' in data:
+ for ip4 in data['ips']:
+ ovs.params.update({'IPV4_ADDR': ip4['ip']})
+ apply_ovs_ipv4_flows(ovs, bridge, params)
+ if net_type in ('ipv6', 'all') and 'ip6s' in data:
+ for ip6 in data['ip6s']:
+ link_local = str(netaddr.EUI(data['mac']).eui64()\
+ .ipv6_link_local())
+ ovs.params.update({'IPV6_LINK_LOCAL_ADDR': link_local})
+ ovs.params.update({'IPV6_GLOBAL_ADDR': ip6['ip']})
+ apply_ovs_ipv6_flows(ovs, bridge, params)
+
+
+def apply_ovs_ipv4_flows(ovs, bridge, params):
+ # allow valid ARP outbound (both request / reply)
+ ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,arp,"
+ "arp_sha=%(MAC)s,nw_src=%(IPV4_ADDR)s,actions=normal")
+
+ ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,arp,"
+ "arp_sha=%(MAC)s,nw_src=0.0.0.0,actions=normal")
+
+ # allow valid IPv4 outbound
+ ovs.add("priority=3,in_port=%(OF_PORT)s,dl_src=%(MAC)s,ip,"
+ "nw_src=%(IPV4_ADDR)s,actions=normal")
+
+
+def apply_ovs_ipv6_flows(ovs, bridge, params):
+ # allow valid IPv6 ND outbound (are both global and local IPs needed?)
+ # Neighbor Solicitation
+ ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
+ "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s,"
+ "actions=normal")
+ ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
+ "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=135,actions=normal")
+ ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
+ "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,nd_sll=%(MAC)s,"
+ "actions=normal")
+ ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
+ "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=135,actions=normal")
+
+ # Neighbor Advertisement
+ ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
+ "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136,"
+ "nd_target=%(IPV6_LINK_LOCAL_ADDR)s,actions=normal")
+ ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
+ "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp_type=136,actions=normal")
+ ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
+ "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136,"
+ "nd_target=%(IPV6_GLOBAL_ADDR)s,actions=normal")
+ ovs.add("priority=6,in_port=%(OF_PORT)s,dl_src=%(MAC)s,icmp6,"
+ "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp_type=136,actions=normal")
+
+ # drop all other neighbor discovery (req b/c we permit all icmp6 below)
+ ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=135,actions=drop")
+ ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=136,actions=drop")
+
+ # do not allow sending specifc ICMPv6 types
+ # Router Advertisement
+ ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=134,actions=drop")
+ # Redirect Gateway
+ ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=137,actions=drop")
+ # Mobile Prefix Solicitation
+ ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=146,actions=drop")
+ # Mobile Prefix Advertisement
+ ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=147,actions=drop")
+ # Multicast Router Advertisement
+ ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=151,actions=drop")
+ # Multicast Router Solicitation
+ ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=152,actions=drop")
+ # Multicast Router Termination
+ ovs.add("priority=5,in_port=%(OF_PORT)s,icmp6,icmp_type=153,actions=drop")
+
+ # allow valid IPv6 outbound, by type
+ ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
+ "ipv6_src=%(IPV6_GLOBAL_ADDR)s,icmp6,actions=normal")
+ ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
+ "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,icmp6,actions=normal")
+ ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
+ "ipv6_src=%(IPV6_GLOBAL_ADDR)s,tcp6,actions=normal")
+ ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
+ "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,tcp6,actions=normal")
+ ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
+ "ipv6_src=%(IPV6_GLOBAL_ADDR)s,udp6,actions=normal")
+ ovs.add("priority=4,in_port=%(OF_PORT)s,dl_src=%(MAC)s,"
+ "ipv6_src=%(IPV6_LINK_LOCAL_ADDR)s,udp6,actions=normal")
+ # all else will be dropped ...
+
+
+if __name__ == "__main__":
+ if len(sys.argv) != 4:
+ print "usage: %s [online|offline] vif-domid-idx [ipv4|ipv6|all] " % \
+ os.path.basename(sys.argv[0])
+ sys.exit(1)
+ else:
+ command, vif_raw, net_type = sys.argv[1:4]
+ main(command, vif_raw, net_type)
diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py
index 48122e6d6..662def205 100755
--- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py
+++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2010 OpenStack LLC.
+# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -29,15 +29,18 @@ import sys
import simplejson as json
+from novalib import execute, execute_get_output
+
+
def main(dom_id, command, only_this_vif=None):
- xsls = execute('/usr/bin/xenstore-ls',
- '/local/domain/%s/vm-data/networking' % dom_id, True)
+ xsls = execute_get_output('/usr/bin/xenstore-ls',
+ '/local/domain/%s/vm-data/networking' % dom_id)
macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
for mac in macs:
- xsread = execute('/usr/bin/enstore-read',
- '/local/domain/%s/vm-data/networking/%s' %
- (dom_id, mac), True)
+ xsread = execute_get_output('/usr/bin/xenstore-read',
+ '/local/domain/%s/vm-data/networking/%s' %
+ (dom_id, mac))
data = json.loads(xsread)
for ip in data['ips']:
if data["label"] == "public":
@@ -52,17 +55,6 @@ def main(dom_id, command, only_this_vif=None):
apply_iptables_rules(command, params)
-def execute(*command, return_stdout=False):
- devnull = open(os.devnull, 'w')
- command = map(str, command)
- proc = subprocess.Popen(command, close_fds=True,
- stdout=subprocess.PIPE, stderr=devnull)
- devnull.close()
- if return_stdout:
- return proc.stdout.read()
- else:
- return None
-
# A note about adding rules:
# Whenever we add any rule to iptables, arptables or ebtables we first
# delete the same rule to ensure the rule only exists once.
@@ -113,8 +105,8 @@ def apply_ebtables_rules(command, params):
ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'],
'--arp-ip-dst', params['IP'],
'-j', 'ACCEPT')
- ebtables('-D', 'FORWARD', '-p', '0800', '-o',
- params['VIF'], '--ip-dst', params['IP'],
+ ebtables('-D', 'FORWARD', '-p', '0800', '-o', params['VIF'],
+ '--ip-dst', params['IP'],
'-j', 'ACCEPT')
if command == 'online':
ebtables('-A', 'FORWARD', '-p', '0806',
diff --git a/tools/install_venv.py b/tools/install_venv.py
index 812b1dd0f..f4b6583ed 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -36,7 +36,7 @@ PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
def die(message, *args):
- print >>sys.stderr, message % args
+ print >> sys.stderr, message % args
sys.exit(1)