diff options
| author | Anthony Young <sleepsonthefloor@gmail.com> | 2011-06-03 14:43:12 -0700 |
|---|---|---|
| committer | Anthony Young <sleepsonthefloor@gmail.com> | 2011-06-03 14:43:12 -0700 |
| commit | f047047356e91c6227e1e400eea80d2f35c35285 (patch) | |
| tree | 7ce72be00ee8b8d48291df5ca7383805e831ddbb /nova | |
| parent | 9ada213e500cc3233c048d834791924947545a67 (diff) | |
| parent | 3101d000407aff5b2c1f8f531c08848a9c909865 (diff) | |
| download | nova-f047047356e91c6227e1e400eea80d2f35c35285.tar.gz nova-f047047356e91c6227e1e400eea80d2f35c35285.tar.xz nova-f047047356e91c6227e1e400eea80d2f35c35285.zip | |
merge with trunk
Diffstat (limited to 'nova')
112 files changed, 3298 insertions, 1426 deletions
diff --git a/nova/api/direct.py b/nova/api/direct.py index 8ceae299c..ea20042a7 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -42,6 +42,7 @@ from nova import exception from nova import flags from nova import utils from nova import wsgi +import nova.api.openstack.wsgi # Global storage for registering modules. @@ -251,7 +252,7 @@ class Reflection(object): return self._methods[method] -class ServiceWrapper(wsgi.Controller): +class ServiceWrapper(object): """Wrapper to dynamically povide a WSGI controller for arbitrary objects. With lightweight introspection allows public methods on the object to @@ -265,7 +266,7 @@ class ServiceWrapper(wsgi.Controller): def __init__(self, service_handle): self.service_handle = service_handle - @webob.dec.wsgify(RequestClass=wsgi.Request) + @webob.dec.wsgify(RequestClass=nova.api.openstack.wsgi.Request) def __call__(self, req): arg_dict = req.environ['wsgiorg.routing_args'][1] action = arg_dict['action'] @@ -289,8 +290,11 @@ class ServiceWrapper(wsgi.Controller): try: content_type = req.best_match_content_type() - default_xmlns = self.get_default_xmlns(req) - return self._serialize(result, content_type, default_xmlns) + serializer = { + 'application/xml': nova.api.openstack.wsgi.XMLDictSerializer(), + 'application/json': nova.api.openstack.wsgi.JSONDictSerializer(), + }[content_type] + return serializer.serialize(result) except: raise exception.Error("returned non-serializable type: %s" % result) diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index ea94d9c1f..aeebd86fb 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -21,7 +21,6 @@ Admin API controller, exposed through http via the api worker. """ import base64 -import datetime from nova import db from nova import exception @@ -305,7 +304,7 @@ class AdminController(object): * Volume Count """ services = db.service_get_all(context, False) - now = datetime.datetime.utcnow() + now = utils.utcnow() hosts = [] rv = [] for host in [service['host'] for service in services]: diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index d92838f38..b7a9a8633 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -23,7 +23,6 @@ datastore. """ import base64 -import datetime import IPy import os import urllib @@ -159,7 +158,7 @@ class CloudController(object): floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) - image_ec2_id = self.image_ec2_id(instance_ref['image_id']) + image_ec2_id = self.image_ec2_id(instance_ref['image_ref']) data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { @@ -235,7 +234,7 @@ class CloudController(object): 'zoneState': 'available'}]} services = db.service_get_all(context, False) - now = datetime.datetime.utcnow() + now = utils.utcnow() hosts = [] for host in [service['host'] for service in services]: if not host in hosts: @@ -595,7 +594,7 @@ class CloudController(object): instance_id = ec2utils.ec2_id_to_id(ec2_id) output = self.compute_api.get_console_output( context, instance_id=instance_id) - now = datetime.datetime.utcnow() + now = utils.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} @@ -665,11 +664,20 @@ class CloudController(object): v['display_description'] = volume['display_description'] return v - def create_volume(self, context, size, **kwargs): - LOG.audit(_("Create volume of %s GB"), size, context=context) + def create_volume(self, context, **kwargs): + size = kwargs.get('size') + if kwargs.get('snapshot_id') != None: + snapshot_id = ec2utils.ec2_id_to_id(kwargs['snapshot_id']) + LOG.audit(_("Create volume from snapshot %s"), snapshot_id, + context=context) + else: + snapshot_id = None + LOG.audit(_("Create volume of %s GB"), size, context=context) + volume = self.volume_api.create( context, size=size, + snapshot_id=snapshot_id, name=kwargs.get('display_name'), description=kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of @@ -765,13 +773,13 @@ class CloudController(object): instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.is_admin: - if instance['image_id'] == str(FLAGS.vpn_image_id): + if instance['image_ref'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id - i['imageId'] = self.image_ec2_id(instance['image_id']) + i['imageId'] = self.image_ec2_id(instance['image_ref']) i['instanceState'] = { 'code': instance['state'], 'name': instance['state_description']} @@ -890,7 +898,7 @@ class CloudController(object): instances = self.compute_api.create(context, instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), - image_id=self._get_image(context, kwargs['image_id'])['id'], + image_href=self._get_image(context, kwargs['image_id'])['id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id'), @@ -966,7 +974,12 @@ class CloudController(object): def image_ec2_id(image_id, image_type='ami'): """Returns image ec2_id using id and three letter type.""" template = image_type + '-%08x' - return ec2utils.id_to_ec2_id(int(image_id), template=template) + try: + return ec2utils.id_to_ec2_id(int(image_id), template=template) + except ValueError: + #TODO(wwolf): once we have ec2_id -> glance_id mapping + # in place, this wont be necessary + return "ami-00000000" def _get_image(self, context, ec2_id): try: diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index 28f99b0ef..b70266a20 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -23,6 +23,7 @@ import webob.exc from nova import log as logging from nova import flags +from nova import utils from nova import wsgi from nova.api.ec2 import cloud @@ -71,7 +72,15 @@ class MetadataRequestHandler(wsgi.Application): remote_address = req.remote_addr if FLAGS.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) - meta_data = cc.get_metadata(remote_address) + try: + meta_data = cc.get_metadata(remote_address) + except Exception: + LOG.exception(_('Failed to get metadata for ip: %s'), + remote_address) + msg = _('An unknown error has occurred. ' + 'Please try your request again.') + exc = webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + return exc if meta_data is None: LOG.error(_('Failed to get metadata for ip: %s'), remote_address) raise webob.exc.HTTPNotFound() diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 5b7f080ad..d8fb5265b 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -26,7 +26,7 @@ import webob.exc from nova import flags from nova import log as logging -from nova import wsgi +from nova import wsgi as base_wsgi from nova.api.openstack import accounts from nova.api.openstack import faults from nova.api.openstack import backup_schedules @@ -40,6 +40,7 @@ from nova.api.openstack import servers from nova.api.openstack import server_metadata from nova.api.openstack import shared_ip_groups from nova.api.openstack import users +from nova.api.openstack import wsgi from nova.api.openstack import zones @@ -50,7 +51,7 @@ flags.DEFINE_bool('allow_admin_api', 'When True, this API service will accept admin operations.') -class FaultWrapper(wsgi.Middleware): +class FaultWrapper(base_wsgi.Middleware): """Calls down the middleware stack, making exceptions into faults.""" @webob.dec.wsgify(RequestClass=wsgi.Request) @@ -63,7 +64,7 @@ class FaultWrapper(wsgi.Middleware): return faults.Fault(exc) -class APIRouter(wsgi.Router): +class APIRouter(base_wsgi.Router): """ Routes requests on the OpenStack API to the appropriate controller and method. @@ -97,19 +98,21 @@ class APIRouter(wsgi.Router): server_members['reset_network'] = 'POST' server_members['inject_network_info'] = 'POST' - mapper.resource("zone", "zones", controller=zones.Controller(), + mapper.resource("zone", "zones", + controller=zones.create_resource(), collection={'detail': 'GET', 'info': 'GET', 'select': 'GET'}) - mapper.resource("user", "users", controller=users.Controller(), + mapper.resource("user", "users", + controller=users.create_resource(), collection={'detail': 'GET'}) mapper.resource("account", "accounts", - controller=accounts.Controller(), + controller=accounts.create_resource(), collection={'detail': 'GET'}) mapper.resource("console", "consoles", - controller=consoles.Controller(), + controller=consoles.create_resource(), parent_resource=dict(member_name='server', collection_name='servers')) @@ -122,31 +125,31 @@ class APIRouterV10(APIRouter): def _setup_routes(self, mapper): super(APIRouterV10, self)._setup_routes(mapper) mapper.resource("server", "servers", - controller=servers.ControllerV10(), + controller=servers.create_resource('1.0'), collection={'detail': 'GET'}, member=self.server_members) mapper.resource("image", "images", - controller=images.ControllerV10(), + controller=images.create_resource('1.0'), collection={'detail': 'GET'}) mapper.resource("flavor", "flavors", - controller=flavors.ControllerV10(), + controller=flavors.create_resource('1.0'), collection={'detail': 'GET'}) mapper.resource("shared_ip_group", "shared_ip_groups", collection={'detail': 'GET'}, - controller=shared_ip_groups.Controller()) + controller=shared_ip_groups.create_resource()) mapper.resource("backup_schedule", "backup_schedule", - controller=backup_schedules.Controller(), + controller=backup_schedules.create_resource(), parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("limit", "limits", - controller=limits.LimitsControllerV10()) + controller=limits.create_resource('1.0')) - mapper.resource("ip", "ips", controller=ips.Controller(), + mapper.resource("ip", "ips", controller=ips.create_resource(), collection=dict(public='GET', private='GET'), parent_resource=dict(member_name='server', collection_name='servers')) @@ -158,27 +161,27 @@ class APIRouterV11(APIRouter): def _setup_routes(self, mapper): super(APIRouterV11, self)._setup_routes(mapper) mapper.resource("server", "servers", - controller=servers.ControllerV11(), + controller=servers.create_resource('1.1'), collection={'detail': 'GET'}, member=self.server_members) mapper.resource("image", "images", - controller=images.ControllerV11(), + controller=images.create_resource('1.1'), collection={'detail': 'GET'}) mapper.resource("image_meta", "meta", - controller=image_metadata.Controller(), + controller=image_metadata.create_resource(), parent_resource=dict(member_name='image', collection_name='images')) mapper.resource("server_meta", "meta", - controller=server_metadata.Controller(), + controller=server_metadata.create_resource(), parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("flavor", "flavors", - controller=flavors.ControllerV11(), + controller=flavors.create_resource('1.1'), collection={'detail': 'GET'}) mapper.resource("limit", "limits", - controller=limits.LimitsControllerV11()) + controller=limits.create_resource('1.1')) diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py index 00fdd4540..0dcd37217 100644 --- a/nova/api/openstack/accounts.py +++ b/nova/api/openstack/accounts.py @@ -20,8 +20,9 @@ from nova import flags from nova import log as logging from nova.auth import manager -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi + FLAGS = flags.FLAGS LOG = logging.getLogger('nova.api.openstack') @@ -34,12 +35,7 @@ def _translate_keys(account): manager=account.project_manager_id) -class Controller(common.OpenstackController): - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "account": ["id", "name", "description", "manager"]}}} +class Controller(object): def __init__(self): self.manager = manager.AuthManager() @@ -66,20 +62,33 @@ class Controller(common.OpenstackController): self.manager.delete_project(id) return {} - def create(self, req): + def create(self, req, body): """We use update with create-or-update semantics because the id comes from an external source""" raise faults.Fault(webob.exc.HTTPNotImplemented()) - def update(self, req, id): + def update(self, req, id, body): """This is really create or update.""" self._check_admin(req.environ['nova.context']) - env = self._deserialize(req.body, req.get_content_type()) - description = env['account'].get('description') - manager = env['account'].get('manager') + description = body['account'].get('description') + manager = body['account'].get('manager') try: account = self.manager.get_project(id) self.manager.modify_project(id, manager, description) except exception.NotFound: account = self.manager.create_project(id, manager, description) return dict(account=_translate_keys(account)) + + +def create_resource(): + metadata = { + "attributes": { + "account": ["id", "name", "description", "manager"], + }, + } + + serializers = { + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 6c6ee22a2..b49bf449b 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -13,9 +13,8 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. -import datetime import hashlib import time @@ -127,7 +126,7 @@ class AuthMiddleware(wsgi.Middleware): except exception.NotFound: return None if token: - delta = datetime.datetime.utcnow() - token['created_at'] + delta = utils.utcnow() - token['created_at'] if delta.days >= 2: self.db.auth_token_destroy(ctxt, token['token_hash']) else: diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index 4bf744046..71a14d4ce 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -19,9 +19,8 @@ import time from webob import exc -from nova.api.openstack import common from nova.api.openstack import faults -import nova.image.service +from nova.api.openstack import wsgi def _translate_keys(inst): @@ -29,14 +28,9 @@ def _translate_keys(inst): return dict(backupSchedule=inst) -class Controller(common.OpenstackController): +class Controller(object): """ The backup schedule API controller for the Openstack API """ - _serialization_metadata = { - 'application/xml': { - 'attributes': { - 'backupSchedule': []}}} - def __init__(self): pass @@ -48,7 +42,7 @@ class Controller(common.OpenstackController): """ Returns a single backup schedule for a given instance """ return faults.Fault(exc.HTTPNotImplemented()) - def create(self, req, server_id): + def create(self, req, server_id, body): """ No actual update method required, since the existing API allows both create and update through a POST """ return faults.Fault(exc.HTTPNotImplemented()) @@ -56,3 +50,18 @@ class Controller(common.OpenstackController): def delete(self, req, server_id, id): """ Deletes an existing backup schedule """ return faults.Fault(exc.HTTPNotImplemented()) + + +def create_resource(): + metadata = { + 'attributes': { + 'backupSchedule': [], + }, + } + + serializers = { + 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10, + metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 32cd689ca..571e46766 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -23,7 +23,6 @@ import webob from nova import exception from nova import flags from nova import log as logging -from nova import wsgi LOG = logging.getLogger('nova.api.openstack.common') @@ -100,34 +99,6 @@ def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): return items[start_index:range_end] -def get_image_id_from_image_hash(image_service, context, image_hash): - """Given an Image ID Hash, return an objectstore Image ID. - - image_service - reference to objectstore compatible image service. - context - security context for image service requests. - image_hash - hash of the image ID. - """ - - # FIX(sandy): This is terribly inefficient. It pulls all images - # from objectstore in order to find the match. ObjectStore - # should have a numeric counterpart to the string ID. - try: - items = image_service.detail(context) - except NotImplementedError: - items = image_service.index(context) - for image in items: - image_id = image['id'] - try: - if abs(hash(image_id)) == int(image_hash): - return image_id - except ValueError: - msg = _("Requested image_id has wrong format: %s," - "should have numerical format") % image_id - LOG.error(msg) - raise Exception(msg) - raise exception.ImageNotFound(image_id=image_hash) - - def get_id_from_href(href): """Return the id portion of a url as an int. @@ -146,9 +117,3 @@ def get_id_from_href(href): except: LOG.debug(_("Error extracting id from href: %s") % href) raise webob.exc.HTTPBadRequest(_('could not parse id from href')) - - -class OpenstackController(wsgi.Controller): - def get_default_xmlns(self, req): - # Use V10 by default - return XML_NS_V10 diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 1a77f25d7..bccf04d8f 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -19,8 +19,8 @@ from webob import exc from nova import console from nova import exception -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi def _translate_keys(cons): @@ -43,17 +43,11 @@ def _translate_detail_keys(cons): return dict(console=info) -class Controller(common.OpenstackController): - """The Consoles Controller for the Openstack API""" - - _serialization_metadata = { - 'application/xml': { - 'attributes': { - 'console': []}}} +class Controller(object): + """The Consoles controller for the Openstack API""" def __init__(self): self.console_api = console.API() - super(Controller, self).__init__() def index(self, req, server_id): """Returns a list of consoles for this instance""" @@ -63,9 +57,8 @@ class Controller(common.OpenstackController): return dict(consoles=[_translate_keys(console) for console in consoles]) - def create(self, req, server_id): + def create(self, req, server_id, body): """Creates a new console""" - #info = self._deserialize(req.body, req.get_content_type()) self.console_api.create_console( req.environ['nova.context'], int(server_id)) @@ -94,3 +87,17 @@ class Controller(common.OpenstackController): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() + + +def create_resource(): + metadata = { + 'attributes': { + 'console': [], + }, + } + + serializers = { + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/contrib/__init__.py b/nova/api/openstack/contrib/__init__.py index b42a1d89d..acb5eb280 100644 --- a/nova/api/openstack/contrib/__init__.py +++ b/nova/api/openstack/contrib/__init__.py @@ -13,7 +13,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """Contrib contains extensions that are shipped with nova. diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index 18de2ec71..feabdce89 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -22,7 +22,6 @@ from nova import exception from nova import flags from nova import log as logging from nova import volume -from nova import wsgi from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import faults @@ -64,7 +63,7 @@ def _translate_volume_summary_view(context, vol): return d -class VolumeController(wsgi.Controller): +class VolumeController(object): """The Volumes API controller for the OpenStack API.""" _serialization_metadata = { @@ -124,18 +123,17 @@ class VolumeController(wsgi.Controller): res = [entity_maker(context, vol) for vol in limited_list] return {'volumes': res} - def create(self, req): + def create(self, req, body): """Creates a new volume.""" context = req.environ['nova.context'] - env = self._deserialize(req.body, req.get_content_type()) - if not env: + if not body: return faults.Fault(exc.HTTPUnprocessableEntity()) - vol = env['volume'] + vol = body['volume'] size = vol['size'] LOG.audit(_("Create volume of %s GB"), size, context=context) - new_volume = self.volume_api.create(context, size, + new_volume = self.volume_api.create(context, size, None, vol.get('display_name'), vol.get('display_description')) @@ -175,7 +173,7 @@ def _translate_attachment_summary_view(_context, vol): return d -class VolumeAttachmentController(wsgi.Controller): +class VolumeAttachmentController(object): """The volume attachment API controller for the Openstack API. A child resource of the server. Note that we use the volume id @@ -219,17 +217,16 @@ class VolumeAttachmentController(wsgi.Controller): return {'volumeAttachment': _translate_attachment_detail_view(context, vol)} - def create(self, req, server_id): + def create(self, req, server_id, body): """Attach a volume to an instance.""" context = req.environ['nova.context'] - env = self._deserialize(req.body, req.get_content_type()) - if not env: + if not body: return faults.Fault(exc.HTTPUnprocessableEntity()) instance_id = server_id - volume_id = env['volumeAttachment']['volumeId'] - device = env['volumeAttachment']['device'] + volume_id = body['volumeAttachment']['volumeId'] + device = body['volumeAttachment']['device'] msg = _("Attach volume %(volume_id)s to instance %(server_id)s" " at %(device)s") % locals() @@ -259,7 +256,7 @@ class VolumeAttachmentController(wsgi.Controller): # TODO(justinsb): How do I return "accepted" here? return {'volumeAttachment': attachment} - def update(self, _req, _server_id, _id): + def update(self, req, server_id, id, body): """Update a volume attachment. We don't currently support this.""" return faults.Fault(exc.HTTPBadRequest()) diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 8e77b25fb..881b61733 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -27,9 +27,10 @@ import webob.exc from nova import exception from nova import flags from nova import log as logging -from nova import wsgi +from nova import wsgi as base_wsgi from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi LOG = logging.getLogger('extensions') @@ -115,28 +116,34 @@ class ExtensionDescriptor(object): return request_exts -class ActionExtensionController(common.OpenstackController): - +class ActionExtensionController(object): def __init__(self, application): - self.application = application self.action_handlers = {} def add_action(self, action_name, handler): self.action_handlers[action_name] = handler - def action(self, req, id): - - input_dict = self._deserialize(req.body, req.get_content_type()) + def action(self, req, id, body): for action_name, handler in self.action_handlers.iteritems(): - if action_name in input_dict: - return handler(input_dict, req, id) + if action_name in body: + return handler(body, req, id) # no action handler found (bump to downstream application) res = self.application return res -class RequestExtensionController(common.OpenstackController): +class ActionExtensionResource(wsgi.Resource): + + def __init__(self, application): + controller = ActionExtensionController(application) + super(ActionExtensionResource, self).__init__(controller) + + def add_action(self, action_name, handler): + self.controller.add_action(action_name, handler) + + +class RequestExtensionController(object): def __init__(self, application): self.application = application @@ -153,7 +160,17 @@ class RequestExtensionController(common.OpenstackController): return res -class ExtensionController(common.OpenstackController): +class RequestExtensionResource(wsgi.Resource): + + def __init__(self, application): + controller = RequestExtensionController(application) + super(RequestExtensionResource, self).__init__(controller) + + def add_handler(self, handler): + self.controller.add_handler(handler) + + +class ExtensionsResource(wsgi.Resource): def __init__(self, extension_manager): self.extension_manager = extension_manager @@ -186,7 +203,7 @@ class ExtensionController(common.OpenstackController): raise faults.Fault(webob.exc.HTTPNotFound()) -class ExtensionMiddleware(wsgi.Middleware): +class ExtensionMiddleware(base_wsgi.Middleware): """Extensions middleware for WSGI.""" @classmethod def factory(cls, global_config, **local_config): @@ -195,43 +212,43 @@ class ExtensionMiddleware(wsgi.Middleware): return cls(app, **local_config) return _factory - def _action_ext_controllers(self, application, ext_mgr, mapper): - """Return a dict of ActionExtensionController-s by collection.""" - action_controllers = {} + def _action_ext_resources(self, application, ext_mgr, mapper): + """Return a dict of ActionExtensionResource-s by collection.""" + action_resources = {} for action in ext_mgr.get_actions(): - if not action.collection in action_controllers.keys(): - controller = ActionExtensionController(application) + if not action.collection in action_resources.keys(): + resource = ActionExtensionResource(application) mapper.connect("/%s/:(id)/action.:(format)" % action.collection, action='action', - controller=controller, + controller=resource, conditions=dict(method=['POST'])) mapper.connect("/%s/:(id)/action" % action.collection, action='action', - controller=controller, + controller=resource, conditions=dict(method=['POST'])) - action_controllers[action.collection] = controller + action_resources[action.collection] = resource - return action_controllers + return action_resources - def _request_ext_controllers(self, application, ext_mgr, mapper): - """Returns a dict of RequestExtensionController-s by collection.""" - request_ext_controllers = {} + def _request_ext_resources(self, application, ext_mgr, mapper): + """Returns a dict of RequestExtensionResource-s by collection.""" + request_ext_resources = {} for req_ext in ext_mgr.get_request_extensions(): - if not req_ext.key in request_ext_controllers.keys(): - controller = RequestExtensionController(application) + if not req_ext.key in request_ext_resources.keys(): + resource = RequestExtensionResource(application) mapper.connect(req_ext.url_route + '.:(format)', action='process', - controller=controller, + controller=resource, conditions=req_ext.conditions) mapper.connect(req_ext.url_route, action='process', - controller=controller, + controller=resource, conditions=req_ext.conditions) - request_ext_controllers[req_ext.key] = controller + request_ext_resources[req_ext.key] = resource - return request_ext_controllers + return request_ext_resources def __init__(self, application, ext_mgr=None): @@ -246,22 +263,22 @@ class ExtensionMiddleware(wsgi.Middleware): LOG.debug(_('Extended resource: %s'), resource.collection) mapper.resource(resource.collection, resource.collection, - controller=resource.controller, + controller=wsgi.Resource(resource.controller), collection=resource.collection_actions, member=resource.member_actions, parent_resource=resource.parent) # extended actions - action_controllers = self._action_ext_controllers(application, ext_mgr, + action_resources = self._action_ext_resources(application, ext_mgr, mapper) for action in ext_mgr.get_actions(): LOG.debug(_('Extended action: %s'), action.action_name) - controller = action_controllers[action.collection] - controller.add_action(action.action_name, action.handler) + resource = action_resources[action.collection] + resource.add_action(action.action_name, action.handler) # extended requests - req_controllers = self._request_ext_controllers(application, ext_mgr, - mapper) + req_controllers = self._request_ext_resources(application, ext_mgr, + mapper) for request_ext in ext_mgr.get_request_extensions(): LOG.debug(_('Extended request: %s'), request_ext.key) controller = req_controllers[request_ext.key] @@ -313,7 +330,7 @@ class ExtensionManager(object): """Returns a list of ResourceExtension objects.""" resources = [] resources.append(ResourceExtension('extensions', - ExtensionController(self))) + ExtensionsResource(self))) for alias, ext in self.extensions.iteritems(): try: resources.extend(ext.get_resources()) @@ -410,7 +427,7 @@ class ExtensionManager(object): class RequestExtension(object): - """Extend requests and responses of core nova OpenStack API controllers. + """Extend requests and responses of core nova OpenStack API resources. Provide a way to add data to responses and handle custom request data that is sent to core nova OpenStack API controllers. @@ -424,7 +441,7 @@ class RequestExtension(object): class ActionExtension(object): - """Add custom actions to core nova OpenStack API controllers.""" + """Add custom actions to core nova OpenStack API resources.""" def __init__(self, collection, action_name, handler): self.collection = collection diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index 87118ce19..b9a23c126 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -19,8 +19,7 @@ import webob.dec import webob.exc -from nova import wsgi -from nova.api.openstack import common +from nova.api.openstack import wsgi class Fault(webob.exc.HTTPException): @@ -55,13 +54,21 @@ class Fault(webob.exc.HTTPException): if code == 413: retry = self.wrapped_exc.headers['Retry-After'] fault_data[fault_name]['retryAfter'] = retry + # 'code' is an attribute on the fault tag itself - metadata = {'application/xml': {'attributes': {fault_name: 'code'}}} - default_xmlns = common.XML_NS_V10 - serializer = wsgi.Serializer(metadata, default_xmlns) + metadata = {'attributes': {fault_name: 'code'}} + content_type = req.best_match_content_type() - self.wrapped_exc.body = serializer.serialize(fault_data, content_type) + + serializer = { + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), + 'application/json': wsgi.JSONDictSerializer(), + }[content_type] + + self.wrapped_exc.body = serializer.serialize(fault_data) self.wrapped_exc.content_type = content_type + return self.wrapped_exc @@ -70,14 +77,6 @@ class OverLimitFault(webob.exc.HTTPException): Rate-limited request response. """ - _serialization_metadata = { - "application/xml": { - "attributes": { - "overLimitFault": "code", - }, - }, - } - def __init__(self, message, details, retry_time): """ Initialize new `OverLimitFault` with relevant information. @@ -97,8 +96,16 @@ class OverLimitFault(webob.exc.HTTPException): Return the wrapped exception with a serialized body conforming to our error format. """ - serializer = wsgi.Serializer(self._serialization_metadata) content_type = request.best_match_content_type() - content = serializer.serialize(self.content, content_type) + metadata = {"attributes": {"overLimitFault": "code"}} + + serializer = { + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), + 'application/json': wsgi.JSONDictSerializer(), + }[content_type] + + content = serializer.serialize(self.content) self.wrapped_exc.body = content + return self.wrapped_exc diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index 4c5971cf6..a21ff6cb2 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -19,22 +19,13 @@ import webob from nova import db from nova import exception -from nova.api.openstack import common from nova.api.openstack import views +from nova.api.openstack import wsgi -class Controller(common.OpenstackController): +class Controller(object): """Flavor controller for the OpenStack API.""" - _serialization_metadata = { - 'application/xml': { - "attributes": { - "flavor": ["id", "name", "ram", "disk"], - "link": ["rel", "type", "href"], - } - } - } - def index(self, req): """Return all flavors in brief.""" items = self._get_flavors(req, is_detail=False) @@ -71,14 +62,31 @@ class Controller(common.OpenstackController): class ControllerV10(Controller): + def _get_view_builder(self, req): return views.flavors.ViewBuilder() class ControllerV11(Controller): + def _get_view_builder(self, req): base_url = req.application_url return views.flavors.ViewBuilderV11(base_url) - def get_default_xmlns(self, req): - return common.XML_NS_V11 + +def create_resource(version='1.0'): + controller = { + '1.0': ControllerV10, + '1.1': ControllerV11, + }[version]() + + xmlns = { + '1.0': wsgi.XMLNS_V10, + '1.1': wsgi.XMLNS_V11, + }[version] + + serializers = { + 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns), + } + + return wsgi.Resource(controller, serializers=serializers) diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index 1eccc0174..ebfe2bde9 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -18,22 +18,21 @@ from webob import exc from nova import flags +from nova import image from nova import quota from nova import utils -from nova import wsgi -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi FLAGS = flags.FLAGS -class Controller(common.OpenstackController): +class Controller(object): """The image metadata API controller for the Openstack API""" def __init__(self): - self.image_service = utils.import_object(FLAGS.image_service) - super(Controller, self).__init__() + self.image_service = image.get_default_image_service() def _get_metadata(self, context, image_id, image=None): if not image: @@ -64,9 +63,8 @@ class Controller(common.OpenstackController): else: return faults.Fault(exc.HTTPNotFound()) - def create(self, req, image_id): + def create(self, req, image_id, body): context = req.environ['nova.context'] - body = self._deserialize(req.body, req.get_content_type()) img = self.image_service.show(context, image_id) metadata = self._get_metadata(context, image_id, img) if 'metadata' in body: @@ -77,9 +75,8 @@ class Controller(common.OpenstackController): self.image_service.update(context, image_id, img, None) return dict(metadata=metadata) - def update(self, req, image_id, id): + def update(self, req, image_id, id, body): context = req.environ['nova.context'] - body = self._deserialize(req.body, req.get_content_type()) if not id in body: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) @@ -104,3 +101,11 @@ class Controller(common.OpenstackController): metadata.pop(id) img['properties'] = metadata self.image_service.update(context, image_id, img, None) + + +def create_resource(): + serializers = { + 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 34d4c27fc..59d9e3082 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -18,40 +18,34 @@ import webob.exc from nova import compute from nova import exception from nova import flags +import nova.image from nova import log from nova import utils from nova.api.openstack import common from nova.api.openstack import faults from nova.api.openstack.views import images as images_view +from nova.api.openstack import wsgi LOG = log.getLogger('nova.api.openstack.images') FLAGS = flags.FLAGS +SUPPORTED_FILTERS = ['name', 'status'] -class Controller(common.OpenstackController): - """Base `wsgi.Controller` for retrieving/displaying images.""" - _serialization_metadata = { - 'application/xml': { - "attributes": { - "image": ["id", "name", "updated", "created", "status", - "serverId", "progress"], - "link": ["rel", "type", "href"], - }, - }, - } +class Controller(object): + """Base controller for retrieving/displaying images.""" def __init__(self, image_service=None, compute_service=None): """Initialize new `ImageController`. :param compute_service: `nova.compute.api:API` :param image_service: `nova.image.service:BaseImageService` - """ - _default_service = utils.import_object(flags.FLAGS.image_service) + """ self._compute_service = compute_service or compute.API() - self._image_service = image_service or _default_service + self._image_service = image_service or \ + nova.image.get_default_image_service() def index(self, req): """Return an index listing of images available to the request. @@ -59,7 +53,8 @@ class Controller(common.OpenstackController): :param req: `wsgi.Request` object """ context = req.environ['nova.context'] - images = self._image_service.index(context) + filters = self._get_filters(req) + images = self._image_service.index(context, filters) images = common.limited(images, req) builder = self.get_builder(req).build return dict(images=[builder(image, detail=False) for image in images]) @@ -70,31 +65,40 @@ class Controller(common.OpenstackController): :param req: `wsgi.Request` object. """ context = req.environ['nova.context'] - images = self._image_service.detail(context) + filters = self._get_filters(req) + images = self._image_service.detail(context, filters) images = common.limited(images, req) builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) + def _get_filters(self, req): + """ + Return a dictionary of query param filters from the request + + :param req: the Request object coming from the wsgi layer + :retval a dict of key/value filters + """ + filters = {} + for param in req.str_params: + if param in SUPPORTED_FILTERS or param.startswith('property-'): + filters[param] = req.str_params.get(param) + + return filters + def show(self, req, id): """Return detailed information about a specific image. :param req: `wsgi.Request` object - :param id: Image identifier (integer) + :param id: Image identifier """ context = req.environ['nova.context'] try: - image_id = int(id) - except ValueError: + image = self._image_service.show(context, id) + except (exception.NotFound, exception.InvalidImageRef): explanation = _("Image not found.") raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) - try: - image = self._image_service.show(context, image_id) - except exception.NotFound: - explanation = _("Image '%d' not found.") % (image_id) - raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation)) - return dict(image=self.get_builder(req).build(image, detail=True)) def delete(self, req, id): @@ -103,26 +107,24 @@ class Controller(common.OpenstackController): :param req: `wsgi.Request` object :param id: Image identifier (integer) """ - image_id = id context = req.environ['nova.context'] - self._image_service.delete(context, image_id) + self._image_service.delete(context, id) return webob.exc.HTTPNoContent() - def create(self, req): + def create(self, req, body): """Snapshot a server instance and save the image. :param req: `wsgi.Request` object """ context = req.environ['nova.context'] content_type = req.get_content_type() - image = self._deserialize(req.body, content_type) - if not image: + if not body: raise webob.exc.HTTPBadRequest() try: - server_id = image["image"]["serverId"] - image_name = image["image"]["name"] + server_id = body["image"]["serverId"] + image_name = body["image"]["name"] except KeyError: raise webob.exc.HTTPBadRequest() @@ -151,5 +153,29 @@ class ControllerV11(Controller): base_url = request.application_url return images_view.ViewBuilderV11(base_url) - def get_default_xmlns(self, req): - return common.XML_NS_V11 + +def create_resource(version='1.0'): + controller = { + '1.0': ControllerV10, + '1.1': ControllerV11, + }[version]() + + xmlns = { + '1.0': wsgi.XMLNS_V10, + '1.1': wsgi.XMLNS_V11, + }[version] + + metadata = { + "attributes": { + "image": ["id", "name", "updated", "created", "status", + "serverId", "progress"], + "link": ["rel", "type", "href"], + }, + } + + serializers = { + 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns, + metadata=metadata), + } + + return wsgi.Resource(controller, serializers=serializers) diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py index 778e9ba1a..abea71830 100644 --- a/nova/api/openstack/ips.py +++ b/nova/api/openstack/ips.py @@ -20,23 +20,14 @@ import time from webob import exc import nova -import nova.api.openstack.views.addresses -from nova.api.openstack import common from nova.api.openstack import faults +import nova.api.openstack.views.addresses +from nova.api.openstack import wsgi -class Controller(common.OpenstackController): +class Controller(object): """The servers addresses API controller for the Openstack API.""" - _serialization_metadata = { - 'application/xml': { - 'list_collections': { - 'public': {'item_name': 'ip', 'item_key': 'addr'}, - 'private': {'item_name': 'ip', 'item_key': 'addr'}, - }, - }, - } - def __init__(self): self.compute_api = nova.compute.API() self.builder = nova.api.openstack.views.addresses.ViewBuilderV10() @@ -65,8 +56,24 @@ class Controller(common.OpenstackController): def show(self, req, server_id, id): return faults.Fault(exc.HTTPNotImplemented()) - def create(self, req, server_id): + def create(self, req, server_id, body): return faults.Fault(exc.HTTPNotImplemented()) def delete(self, req, server_id, id): return faults.Fault(exc.HTTPNotImplemented()) + + +def create_resource(): + metadata = { + 'list_collections': { + 'public': {'item_name': 'ip', 'item_key': 'addr'}, + 'private': {'item_name': 'ip', 'item_key': 'addr'}, + }, + } + + serializers = { + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=wsgi.XMLNS_V10), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index bd0250a7f..dc2bc6bbc 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -11,7 +11,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """ Module dedicated functions/classes dealing with rate limiting requests. @@ -31,10 +31,12 @@ from collections import defaultdict from webob.dec import wsgify from nova import quota +from nova import wsgi as base_wsgi from nova import wsgi from nova.api.openstack import common from nova.api.openstack import faults from nova.api.openstack.views import limits as limits_views +from nova.api.openstack import wsgi # Convenience constants for the limits dictionary passed to Limiter(). @@ -44,23 +46,11 @@ PER_HOUR = 60 * 60 PER_DAY = 60 * 60 * 24 -class LimitsController(common.OpenstackController): +class LimitsController(object): """ Controller for accessing limits in the OpenStack API. """ - _serialization_metadata = { - "application/xml": { - "attributes": { - "limit": ["verb", "URI", "uri", "regex", "value", "unit", - "resetTime", "next-available", "remaining", "name"], - }, - "plurals": { - "rate": "limit", - }, - }, - } - def index(self, req): """ Return all global and rate limit information. @@ -86,6 +76,35 @@ class LimitsControllerV11(LimitsController): return limits_views.ViewBuilderV11() +def create_resource(version='1.0'): + controller = { + '1.0': LimitsControllerV10, + '1.1': LimitsControllerV11, + }[version]() + + xmlns = { + '1.0': wsgi.XMLNS_V10, + '1.1': wsgi.XMLNS_V11, + }[version] + + metadata = { + "attributes": { + "limit": ["verb", "URI", "uri", "regex", "value", "unit", + "resetTime", "next-available", "remaining", "name"], + }, + "plurals": { + "rate": "limit", + }, + } + + serializers = { + 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns, + metadata=metadata) + } + + return wsgi.Resource(controller, serializers=serializers) + + class Limit(object): """ Stores information about a limit for HTTP requets. @@ -197,7 +216,7 @@ DEFAULT_LIMITS = [ ] -class RateLimitingMiddleware(wsgi.Middleware): +class RateLimitingMiddleware(base_wsgi.Middleware): """ Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. @@ -211,7 +230,7 @@ class RateLimitingMiddleware(wsgi.Middleware): @param application: WSGI application to wrap @param limits: List of dictionaries describing limits """ - wsgi.Middleware.__init__(self, application) + base_wsgi.Middleware.__init__(self, application) self._limiter = Limiter(limits or DEFAULT_LIMITS) @wsgify(RequestClass=wsgi.Request) diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index 88ffc3246..9ede548c2 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -13,7 +13,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. """Rate limiting of arbitrary actions.""" diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index fd64ee4fb..b38b84a2a 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -19,12 +19,11 @@ from webob import exc from nova import compute from nova import quota -from nova import wsgi -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi -class Controller(common.OpenstackController): +class Controller(object): """ The server metadata API controller for the Openstack API """ def __init__(self): @@ -43,10 +42,9 @@ class Controller(common.OpenstackController): context = req.environ['nova.context'] return self._get_metadata(context, server_id) - def create(self, req, server_id): + def create(self, req, server_id, body): context = req.environ['nova.context'] - data = self._deserialize(req.body, req.get_content_type()) - metadata = data.get('metadata') + metadata = body.get('metadata') try: self.compute_api.update_or_create_instance_metadata(context, server_id, @@ -55,9 +53,8 @@ class Controller(common.OpenstackController): self._handle_quota_error(error) return req.body - def update(self, req, server_id, id): + def update(self, req, server_id, id, body): context = req.environ['nova.context'] - body = self._deserialize(req.body, req.get_content_type()) if not id in body: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) @@ -92,3 +89,11 @@ class Controller(common.OpenstackController): if error.code == "MetadataLimitExceeded": raise exc.HTTPBadRequest(explanation=error.message) raise error + + +def create_resource(): + serializers = { + 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 5c10fc916..82d8be4aa 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -22,6 +22,7 @@ from xml.dom import minidom from nova import compute from nova import exception from nova import flags +import nova.image from nova import log as logging from nova import quota from nova import utils @@ -31,6 +32,7 @@ import nova.api.openstack.views.addresses import nova.api.openstack.views.flavors import nova.api.openstack.views.images import nova.api.openstack.views.servers +from nova.api.openstack import wsgi from nova.auth import manager as auth_manager from nova.compute import instance_types import nova.api.openstack @@ -41,41 +43,30 @@ LOG = logging.getLogger('nova.api.openstack.servers') FLAGS = flags.FLAGS -class Controller(common.OpenstackController): +class Controller(object): """ The Server API controller for the OpenStack API """ - _serialization_metadata = { - "application/xml": { - "attributes": { - "server": ["id", "imageId", "name", "flavorId", "hostId", - "status", "progress", "adminPass", "flavorRef", - "imageRef"], - "link": ["rel", "type", "href"], - }, - "dict_collections": { - "metadata": {"item_name": "meta", "item_key": "key"}, - }, - "list_collections": { - "public": {"item_name": "ip", "item_key": "addr"}, - "private": {"item_name": "ip", "item_key": "addr"}, - }, - }, - } - def __init__(self): self.compute_api = compute.API() self._image_service = utils.import_object(FLAGS.image_service) - super(Controller, self).__init__() def index(self, req): """ Returns a list of server names and ids for a given user """ - return self._items(req, is_detail=False) + try: + servers = self._items(req, is_detail=False) + except exception.Invalid as err: + return exc.HTTPBadRequest(str(err)) + return servers def detail(self, req): """ Returns a list of server details for a given user """ - return self._items(req, is_detail=True) + try: + servers = self._items(req, is_detail=True) + except exception.Invalid as err: + return exc.HTTPBadRequest(str(err)) + return servers - def _image_id_from_req_data(self, data): + def _image_ref_from_req_data(self, data): raise NotImplementedError() def _flavor_id_from_req_data(self, data): @@ -122,15 +113,14 @@ class Controller(common.OpenstackController): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() - def create(self, req): + def create(self, req, body): """ Creates a new server for a given user """ - env = self._deserialize_create(req) - if not env: + if not body: return faults.Fault(exc.HTTPUnprocessableEntity()) context = req.environ['nova.context'] - password = self._get_server_admin_password(env['server']) + password = self._get_server_admin_password(body['server']) key_name = None key_data = None @@ -140,29 +130,30 @@ class Controller(common.OpenstackController): key_name = key_pair['name'] key_data = key_pair['public_key'] - requested_image_id = self._image_id_from_req_data(env) + image_href = self._image_ref_from_req_data(body) try: - image_id = common.get_image_id_from_image_hash(self._image_service, - context, requested_image_id) + image_service, image_id = nova.image.get_image_service(image_href) + kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( + req, image_service, image_id) + images = set([str(x['id']) for x in image_service.index(context)]) + assert str(image_id) in images except: - msg = _("Can not find requested image") + msg = _("Cannot find requested image %s") % image_href return faults.Fault(exc.HTTPBadRequest(msg)) - kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( - req, image_id) + personality = body['server'].get('personality') - personality = env['server'].get('personality') injected_files = [] if personality: injected_files = self._get_injected_files(personality) - flavor_id = self._flavor_id_from_req_data(env) + flavor_id = self._flavor_id_from_req_data(body) - if not 'name' in env['server']: + if not 'name' in body['server']: msg = _("Server name is not defined") return exc.HTTPBadRequest(msg) - name = env['server']['name'] + name = body['server']['name'] self._validate_server_name(name) name = name.strip() @@ -172,39 +163,30 @@ class Controller(common.OpenstackController): (inst,) = self.compute_api.create( context, inst_type, - image_id, + image_href, kernel_id=kernel_id, ramdisk_id=ramdisk_id, display_name=name, display_description=name, key_name=key_name, key_data=key_data, - metadata=env['server'].get('metadata', {}), + metadata=body['server'].get('metadata', {}), injected_files=injected_files, admin_password=password) except quota.QuotaError as error: self._handle_quota_error(error) + except exception.ImageNotFound as error: + msg = _("Can not find requested image") + return faults.Fault(exc.HTTPBadRequest(msg)) inst['instance_type'] = inst_type - inst['image_id'] = requested_image_id + inst['image_ref'] = image_href builder = self._get_view_builder(req) server = builder.build(inst, is_detail=True) server['server']['adminPass'] = password return server - def _deserialize_create(self, request): - """ - Deserialize a create request - - Overrides normal behavior in the case of xml content - """ - if request.content_type == "application/xml": - deserializer = ServerCreateRequestXMLDeserializer() - return deserializer.deserialize(request.body) - else: - return self._deserialize(request.body, request.get_content_type()) - def _get_injected_files(self, personality): """ Create a list of injected files from the personality attribute @@ -254,24 +236,23 @@ class Controller(common.OpenstackController): return utils.generate_password(16) @scheduler_api.redirect_handler - def update(self, req, id): + def update(self, req, id, body): """ Updates the server name or password """ if len(req.body) == 0: raise exc.HTTPUnprocessableEntity() - inst_dict = self._deserialize(req.body, req.get_content_type()) - if not inst_dict: + if not body: return faults.Fault(exc.HTTPUnprocessableEntity()) ctxt = req.environ['nova.context'] update_dict = {} - if 'name' in inst_dict['server']: - name = inst_dict['server']['name'] + if 'name' in body['server']: + name = body['server']['name'] self._validate_server_name(name) update_dict['display_name'] = name.strip() - self._parse_update(ctxt, id, inst_dict, update_dict) + self._parse_update(ctxt, id, body, update_dict) try: self.compute_api.update(ctxt, id, **update_dict) @@ -293,7 +274,7 @@ class Controller(common.OpenstackController): pass @scheduler_api.redirect_handler - def action(self, req, id): + def action(self, req, id, body): """Multi-purpose method used to reboot, rebuild, or resize a server""" @@ -306,10 +287,9 @@ class Controller(common.OpenstackController): 'rebuild': self._action_rebuild, } - input_dict = self._deserialize(req.body, req.get_content_type()) for key in actions.keys(): - if key in input_dict: - return actions[key](input_dict, req, id) + if key in body: + return actions[key](body, req, id) return faults.Fault(exc.HTTPNotImplemented()) def _action_change_password(self, input_dict, req, id): @@ -332,19 +312,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() def _action_resize(self, input_dict, req, id): - """ Resizes a given instance to the flavor size requested """ - try: - if 'resize' in input_dict and 'flavorId' in input_dict['resize']: - flavor_id = input_dict['resize']['flavorId'] - self.compute_api.resize(req.environ['nova.context'], id, - flavor_id) - else: - LOG.exception(_("Missing arguments for resize")) - return faults.Fault(exc.HTTPUnprocessableEntity()) - except Exception, e: - LOG.exception(_("Error in resize %s"), e) - return faults.Fault(exc.HTTPBadRequest()) - return exc.HTTPAccepted() + return exc.HTTPNotImplemented() def _action_reboot(self, input_dict, req, id): if 'reboot' in input_dict and 'type' in input_dict['reboot']: @@ -409,7 +377,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def reset_network(self, req, id): + def reset_network(self, req, id, body): """ Reset networking on an instance (admin only). @@ -424,7 +392,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def inject_network_info(self, req, id): + def inject_network_info(self, req, id, body): """ Inject network info for an instance (admin only). @@ -439,7 +407,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def pause(self, req, id): + def pause(self, req, id, body): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] try: @@ -451,7 +419,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def unpause(self, req, id): + def unpause(self, req, id, body): """ Permit Admins to Unpause the server. """ ctxt = req.environ['nova.context'] try: @@ -463,7 +431,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def suspend(self, req, id): + def suspend(self, req, id, body): """permit admins to suspend the server""" context = req.environ['nova.context'] try: @@ -475,7 +443,7 @@ class Controller(common.OpenstackController): return exc.HTTPAccepted() @scheduler_api.redirect_handler - def resume(self, req, id): + def resume(self, req, id, body): """permit admins to resume the server from suspend""" context = req.environ['nova.context'] try: @@ -550,17 +518,15 @@ class Controller(common.OpenstackController): error=item.error)) return dict(actions=actions) - def _get_kernel_ramdisk_from_image(self, req, image_id): + def _get_kernel_ramdisk_from_image(self, req, image_service, image_id): """Fetch an image from the ImageService, then if present, return the associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - image_meta = self._image_service.show(context, image_id) + image_meta = image_service.show(context, image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub - kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image( - image_meta) - return kernel_id, ramdisk_id + return self._do_get_kernel_ramdisk_from_image(image_meta) @staticmethod def _do_get_kernel_ramdisk_from_image(image_meta): @@ -591,7 +557,7 @@ class Controller(common.OpenstackController): class ControllerV10(Controller): - def _image_id_from_req_data(self, data): + def _image_ref_from_req_data(self, data): return data['server']['imageId'] def _flavor_id_from_req_data(self, data): @@ -610,6 +576,21 @@ class ControllerV10(Controller): self.compute_api.set_admin_password(context, server_id, inst_dict['server']['adminPass']) + def _action_resize(self, input_dict, req, id): + """ Resizes a given instance to the flavor size requested """ + try: + if 'resize' in input_dict and 'flavorId' in input_dict['resize']: + flavor_id = input_dict['resize']['flavorId'] + self.compute_api.resize(req.environ['nova.context'], id, + flavor_id) + else: + LOG.exception(_("Missing 'flavorId' argument for resize")) + return faults.Fault(exc.HTTPUnprocessableEntity()) + except Exception, e: + LOG.exception(_("Error in resize %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPAccepted() + def _action_rebuild(self, info, request, instance_id): context = request.environ['nova.context'] instance_id = int(instance_id) @@ -634,9 +615,8 @@ class ControllerV10(Controller): class ControllerV11(Controller): - def _image_id_from_req_data(self, data): - href = data['server']['imageRef'] - return common.get_id_from_href(href) + def _image_ref_from_req_data(self, data): + return data['server']['imageRef'] def _flavor_id_from_req_data(self, data): href = data['server']['flavorRef'] @@ -695,27 +675,44 @@ class ControllerV11(Controller): LOG.info(msg) raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) + def _action_resize(self, input_dict, req, id): + """ Resizes a given instance to the flavor size requested """ + try: + if 'resize' in input_dict and 'flavorRef' in input_dict['resize']: + flavor_ref = input_dict['resize']['flavorRef'] + flavor_id = common.get_id_from_href(flavor_ref) + self.compute_api.resize(req.environ['nova.context'], id, + flavor_id) + else: + LOG.exception(_("Missing 'flavorRef' argument for resize")) + return faults.Fault(exc.HTTPUnprocessableEntity()) + except Exception, e: + LOG.exception(_("Error in resize %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPAccepted() + def _action_rebuild(self, info, request, instance_id): context = request.environ['nova.context'] instance_id = int(instance_id) try: - image_ref = info["rebuild"]["imageRef"] + image_href = info["rebuild"]["imageRef"] except (KeyError, TypeError): msg = _("Could not parse imageRef from request.") LOG.debug(msg) return faults.Fault(exc.HTTPBadRequest(explanation=msg)) - image_id = common.get_id_from_href(image_ref) personalities = info["rebuild"].get("personality", []) - metadata = info["rebuild"].get("metadata", {}) + metadata = info["rebuild"].get("metadata") + name = info["rebuild"].get("name") - self._validate_metadata(metadata) + if metadata: + self._validate_metadata(metadata) self._decode_personalities(personalities) try: - self.compute_api.rebuild(context, instance_id, image_id, metadata, - personalities) + self.compute_api.rebuild(context, instance_id, image_href, name, + metadata, personalities) except exception.BuildInProgress: msg = _("Instance %d is currently being rebuilt.") % instance_id LOG.debug(msg) @@ -735,11 +732,8 @@ class ControllerV11(Controller): raise exc.HTTPBadRequest(msg) return password - def get_default_xmlns(self, req): - return common.XML_NS_V11 - -class ServerCreateRequestXMLDeserializer(object): +class ServerXMLDeserializer(wsgi.XMLDeserializer): """ Deserializer to handle xml-formatted server create requests. @@ -747,7 +741,7 @@ class ServerCreateRequestXMLDeserializer(object): and personality attributes """ - def deserialize(self, string): + def create(self, string): """Deserialize an xml-formatted server create request""" dom = minidom.parseString(string) server = self._extract_server(dom) @@ -814,3 +808,43 @@ class ServerCreateRequestXMLDeserializer(object): if child.nodeType == child.TEXT_NODE: return child.nodeValue return "" + + +def create_resource(version='1.0'): + controller = { + '1.0': ControllerV10, + '1.1': ControllerV11, + }[version]() + + metadata = { + "attributes": { + "server": ["id", "imageId", "name", "flavorId", "hostId", + "status", "progress", "adminPass", "flavorRef", + "imageRef"], + "link": ["rel", "type", "href"], + }, + "dict_collections": { + "metadata": {"item_name": "meta", "item_key": "key"}, + }, + "list_collections": { + "public": {"item_name": "ip", "item_key": "addr"}, + "private": {"item_name": "ip", "item_key": "addr"}, + }, + } + + xmlns = { + '1.0': wsgi.XMLNS_V10, + '1.1': wsgi.XMLNS_V11, + }[version] + + serializers = { + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, + xmlns=xmlns), + } + + deserializers = { + 'application/xml': ServerXMLDeserializer(), + } + + return wsgi.Resource(controller, serializers=serializers, + deserializers=deserializers) diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py index 996db3648..4f11f8dfb 100644 --- a/nova/api/openstack/shared_ip_groups.py +++ b/nova/api/openstack/shared_ip_groups.py @@ -17,29 +17,13 @@ from webob import exc -from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi -def _translate_keys(inst): - """ Coerces a shared IP group instance into proper dictionary format """ - return dict(sharedIpGroup=inst) - - -def _translate_detail_keys(inst): - """ Coerces a shared IP group instance into proper dictionary format with - correctly mapped attributes """ - return dict(sharedIpGroups=inst) - - -class Controller(common.OpenstackController): +class Controller(object): """ The Shared IP Groups Controller for the Openstack API """ - _serialization_metadata = { - 'application/xml': { - 'attributes': { - 'sharedIpGroup': []}}} - def index(self, req): """ Returns a list of Shared IP Groups for the user """ raise faults.Fault(exc.HTTPNotImplemented()) @@ -48,7 +32,7 @@ class Controller(common.OpenstackController): """ Shows in-depth information on a specific Shared IP Group """ raise faults.Fault(exc.HTTPNotImplemented()) - def update(self, req, id): + def update(self, req, id, body): """ You can't update a Shared IP Group """ raise faults.Fault(exc.HTTPNotImplemented()) @@ -60,6 +44,10 @@ class Controller(common.OpenstackController): """ Returns a complete list of Shared IP Groups """ raise faults.Fault(exc.HTTPNotImplemented()) - def create(self, req): + def create(self, req, body): """ Creates a new Shared IP group """ raise faults.Fault(exc.HTTPNotImplemented()) + + +def create_resource(): + return wsgi.Resource(Controller()) diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py index 7ae4c3232..50975fc1f 100644 --- a/nova/api/openstack/users.py +++ b/nova/api/openstack/users.py @@ -20,8 +20,10 @@ from nova import flags from nova import log as logging from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import wsgi from nova.auth import manager + FLAGS = flags.FLAGS LOG = logging.getLogger('nova.api.openstack') @@ -34,12 +36,7 @@ def _translate_keys(user): admin=user.admin) -class Controller(common.OpenstackController): - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "user": ["id", "name", "access", "secret", "admin"]}}} +class Controller(object): def __init__(self): self.manager = manager.AuthManager() @@ -81,23 +78,35 @@ class Controller(common.OpenstackController): self.manager.delete_user(id) return {} - def create(self, req): + def create(self, req, body): self._check_admin(req.environ['nova.context']) - env = self._deserialize(req.body, req.get_content_type()) - is_admin = env['user'].get('admin') in ('T', 'True', True) - name = env['user'].get('name') - access = env['user'].get('access') - secret = env['user'].get('secret') + is_admin = body['user'].get('admin') in ('T', 'True', True) + name = body['user'].get('name') + access = body['user'].get('access') + secret = body['user'].get('secret') user = self.manager.create_user(name, access, secret, is_admin) return dict(user=_translate_keys(user)) - def update(self, req, id): + def update(self, req, id, body): self._check_admin(req.environ['nova.context']) - env = self._deserialize(req.body, req.get_content_type()) - is_admin = env['user'].get('admin') + is_admin = body['user'].get('admin') if is_admin is not None: is_admin = is_admin in ('T', 'True', True) - access = env['user'].get('access') - secret = env['user'].get('secret') + access = body['user'].get('access') + secret = body['user'].get('secret') self.manager.modify_user(id, access, secret, is_admin) return dict(user=_translate_keys(self.manager.get_user(id))) + + +def create_resource(): + metadata = { + "attributes": { + "user": ["id", "name", "access", "secret", "admin"], + }, + } + + serializers = { + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index 3f9d91934..9db160102 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -18,13 +18,26 @@ import webob import webob.dec -from nova import wsgi import nova.api.openstack.views.versions +from nova.api.openstack import wsgi -class Versions(wsgi.Application): - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): +class Versions(wsgi.Resource): + def __init__(self): + metadata = { + "attributes": { + "version": ["status", "id"], + "link": ["rel", "href"], + } + } + + serializers = { + 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), + } + + super(Versions, self).__init__(None, serializers=serializers) + + def dispatch(self, request, *args): """Respond to a request for all OpenStack API versions.""" version_objs = [ { @@ -37,24 +50,6 @@ class Versions(wsgi.Application): }, ] - builder = nova.api.openstack.views.versions.get_view_builder(req) + builder = nova.api.openstack.views.versions.get_view_builder(request) versions = [builder.build(version) for version in version_objs] - response = dict(versions=versions) - - metadata = { - "application/xml": { - "attributes": { - "version": ["status", "id"], - "link": ["rel", "href"], - } - } - } - - content_type = req.best_match_content_type() - body = wsgi.Serializer(metadata).serialize(response, content_type) - - response = webob.Response() - response.content_type = content_type - response.body = body - - return response + return dict(versions=versions) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 0be468edc..b2352e3fd 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -18,6 +18,7 @@ import hashlib import os +from nova import exception from nova.compute import power_state import nova.compute import nova.context @@ -112,8 +113,11 @@ class ViewBuilderV10(ViewBuilder): """Model an Openstack API V1.0 server response.""" def _build_image(self, response, inst): - if 'image_id' in dict(inst): - response['imageId'] = inst['image_id'] + if 'image_ref' in dict(inst): + image_ref = inst['image_ref'] + if str(image_ref).startswith('http'): + raise exception.ListingImageRefsNotSupported() + response['imageId'] = int(image_ref) def _build_flavor(self, response, inst): if 'instance_type' in dict(inst): @@ -130,9 +134,11 @@ class ViewBuilderV11(ViewBuilder): self.base_url = base_url def _build_image(self, response, inst): - if "image_id" in dict(inst): - image_id = inst.get("image_id") - response["imageRef"] = self.image_builder.generate_href(image_id) + if 'image_ref' in dict(inst): + image_href = inst['image_ref'] + if str(image_href).isdigit(): + image_href = int(image_href) + response['imageRef'] = image_href def _build_flavor(self, response, inst): if "instance_type" in dict(inst): diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py new file mode 100644 index 000000000..ddf4e6fa9 --- /dev/null +++ b/nova/api/openstack/wsgi.py @@ -0,0 +1,380 @@ + +import json +import webob +from xml.dom import minidom + +from nova import exception +from nova import log as logging +from nova import utils +from nova import wsgi + + +XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' +XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1' + +LOG = logging.getLogger('nova.api.openstack.wsgi') + + +class Request(webob.Request): + """Add some Openstack API-specific logic to the base webob.Request.""" + + def best_match_content_type(self): + """Determine the requested response content-type. + + Based on the query extension then the Accept header. + + """ + supported = ('application/json', 'application/xml') + + parts = self.path.rsplit('.', 1) + if len(parts) > 1: + ctype = 'application/{0}'.format(parts[1]) + if ctype in supported: + return ctype + + bm = self.accept.best_match(supported) + + # default to application/json if we don't find a preference + return bm or 'application/json' + + def get_content_type(self): + """Determine content type of the request body. + + Does not do any body introspection, only checks header + + """ + if not "Content-Type" in self.headers: + raise exception.InvalidContentType(content_type=None) + + allowed_types = ("application/xml", "application/json") + content_type = self.content_type + + if content_type not in allowed_types: + raise exception.InvalidContentType(content_type=content_type) + else: + return content_type + + +class TextDeserializer(object): + """Custom request body deserialization based on controller action name.""" + + def deserialize(self, datastring, action='default'): + """Find local deserialization method and parse request body.""" + action_method = getattr(self, action, self.default) + return action_method(datastring) + + def default(self, datastring): + """Default deserialization code should live here""" + raise NotImplementedError() + + +class JSONDeserializer(TextDeserializer): + + def default(self, datastring): + return utils.loads(datastring) + + +class XMLDeserializer(TextDeserializer): + + def __init__(self, metadata=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + """ + super(XMLDeserializer, self).__init__() + self.metadata = metadata or {} + + def default(self, datastring): + plurals = set(self.metadata.get('plurals', {})) + node = minidom.parseString(datastring).childNodes[0] + return {node.nodeName: self._from_xml_node(node, plurals)} + + def _from_xml_node(self, node, listnames): + """Convert a minidom node to a simple Python type. + + :param listnames: list of XML node names whose subnodes should + be considered list items. + + """ + if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: + return node.childNodes[0].nodeValue + elif node.nodeName in listnames: + return [self._from_xml_node(n, listnames) for n in node.childNodes] + else: + result = dict() + for attr in node.attributes.keys(): + result[attr] = node.attributes[attr].nodeValue + for child in node.childNodes: + if child.nodeType != node.TEXT_NODE: + result[child.nodeName] = self._from_xml_node(child, + listnames) + return result + + +class RequestDeserializer(object): + """Break up a Request object into more useful pieces.""" + + def __init__(self, deserializers=None): + """ + :param deserializers: dictionary of content-type-specific deserializers + + """ + self.deserializers = { + 'application/xml': XMLDeserializer(), + 'application/json': JSONDeserializer(), + } + + self.deserializers.update(deserializers or {}) + + def deserialize(self, request): + """Extract necessary pieces of the request. + + :param request: Request object + :returns tuple of expected controller action name, dictionary of + keyword arguments to pass to the controller, the expected + content type of the response + + """ + action_args = self.get_action_args(request.environ) + action = action_args.pop('action', None) + + if request.method.lower() in ('post', 'put'): + if len(request.body) == 0: + action_args['body'] = None + else: + content_type = request.get_content_type() + deserializer = self.get_deserializer(content_type) + + try: + body = deserializer.deserialize(request.body, action) + action_args['body'] = body + except exception.InvalidContentType: + action_args['body'] = None + + accept = self.get_expected_content_type(request) + + return (action, action_args, accept) + + def get_deserializer(self, content_type): + try: + return self.deserializers[content_type] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + def get_expected_content_type(self, request): + return request.best_match_content_type() + + def get_action_args(self, request_environment): + """Parse dictionary created by routes library.""" + try: + args = request_environment['wsgiorg.routing_args'][1].copy() + except Exception: + return {} + + try: + del args['controller'] + except KeyError: + pass + + try: + del args['format'] + except KeyError: + pass + + return args + + +class DictSerializer(object): + """Custom response body serialization based on controller action name.""" + + def serialize(self, data, action='default'): + """Find local serialization method and encode response body.""" + action_method = getattr(self, action, self.default) + return action_method(data) + + def default(self, data): + """Default serialization code should live here""" + raise NotImplementedError() + + +class JSONDictSerializer(DictSerializer): + + def default(self, data): + return utils.dumps(data) + + +class XMLDictSerializer(DictSerializer): + + def __init__(self, metadata=None, xmlns=None): + """ + :param metadata: information needed to deserialize xml into + a dictionary. + :param xmlns: XML namespace to include with serialized xml + """ + super(XMLDictSerializer, self).__init__() + self.metadata = metadata or {} + self.xmlns = xmlns + + def default(self, data): + # We expect data to contain a single key which is the XML root. + root_key = data.keys()[0] + doc = minidom.Document() + node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) + + xmlns = node.getAttribute('xmlns') + if not xmlns and self.xmlns: + node.setAttribute('xmlns', self.xmlns) + + return node.toprettyxml(indent=' ') + + def _to_xml_node(self, doc, metadata, nodename, data): + """Recursive method to convert data members to XML nodes.""" + result = doc.createElement(nodename) + + # Set the xml namespace if one is specified + # TODO(justinsb): We could also use prefixes on the keys + xmlns = metadata.get('xmlns', None) + if xmlns: + result.setAttribute('xmlns', xmlns) + + #TODO(bcwaldon): accomplish this without a type-check + if type(data) is list: + collections = metadata.get('list_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for item in data: + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(item)) + result.appendChild(node) + return result + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + node = self._to_xml_node(doc, metadata, singular, item) + result.appendChild(node) + #TODO(bcwaldon): accomplish this without a type-check + elif type(data) is dict: + collections = metadata.get('dict_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for k, v in data.items(): + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(k)) + text = doc.createTextNode(str(v)) + node.appendChild(text) + result.appendChild(node) + return result + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k, v in data.items(): + if k in attrs: + result.setAttribute(k, str(v)) + else: + node = self._to_xml_node(doc, metadata, k, v) + result.appendChild(node) + else: + # Type is atom + node = doc.createTextNode(str(data)) + result.appendChild(node) + return result + + +class ResponseSerializer(object): + """Encode the necessary pieces into a response object""" + + def __init__(self, serializers=None): + """ + :param serializers: dictionary of content-type-specific serializers + + """ + self.serializers = { + 'application/xml': XMLDictSerializer(), + 'application/json': JSONDictSerializer(), + } + self.serializers.update(serializers or {}) + + def serialize(self, response_data, content_type): + """Serialize a dict into a string and wrap in a wsgi.Request object. + + :param response_data: dict produced by the Controller + :param content_type: expected mimetype of serialized response body + + """ + response = webob.Response() + response.headers['Content-Type'] = content_type + + serializer = self.get_serializer(content_type) + response.body = serializer.serialize(response_data) + + return response + + def get_serializer(self, content_type): + try: + return self.serializers[content_type] + except (KeyError, TypeError): + raise exception.InvalidContentType(content_type=content_type) + + +class Resource(wsgi.Application): + """WSGI app that handles (de)serialization and controller dispatch. + + WSGI app that reads routing information supplied by RoutesMiddleware + and calls the requested action method upon its controller. All + controller action methods must accept a 'req' argument, which is the + incoming wsgi.Request. If the operation is a PUT or POST, the controller + method must also accept a 'body' argument (the deserialized request body). + They may raise a webob.exc exception or return a dict, which will be + serialized by requested content type. + + """ + def __init__(self, controller, serializers=None, deserializers=None): + """ + :param controller: object that implement methods created by routes lib + :param serializers: dict of content-type specific text serializers + :param deserializers: dict of content-type specific text deserializers + + """ + self.controller = controller + self.serializer = ResponseSerializer(serializers) + self.deserializer = RequestDeserializer(deserializers) + + @webob.dec.wsgify(RequestClass=Request) + def __call__(self, request): + """WSGI method that controls (de)serialization and method dispatch.""" + + LOG.debug("%(method)s %(url)s" % {"method": request.method, + "url": request.url}) + + try: + action, action_args, accept = self.deserializer.deserialize( + request) + except exception.InvalidContentType: + return webob.exc.HTTPBadRequest(_("Unsupported Content-Type")) + + action_result = self.dispatch(request, action, action_args) + + #TODO(bcwaldon): find a more elegant way to pass through non-dict types + if type(action_result) is dict: + response = self.serializer.serialize(action_result, accept) + else: + response = action_result + + try: + msg_dict = dict(url=request.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + except AttributeError: + msg_dict = dict(url=request.url) + msg = _("%(url)s returned a fault") + + LOG.debug(msg) + + return response + + def dispatch(self, request, action, action_args): + """Find action-spefic method on controller and call it.""" + + controller_method = getattr(self.controller, action) + return controller_method(req=request, **action_args) diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index af73d8f6d..8061b3b67 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -22,6 +22,7 @@ from nova import exception from nova import flags from nova import log as logging from nova.api.openstack import common +from nova.api.openstack import wsgi from nova.scheduler import api @@ -52,12 +53,7 @@ def _scrub_zone(zone): 'deleted', 'deleted_at', 'updated_at')) -class Controller(common.OpenstackController): - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "zone": ["id", "api_url", "name", "capabilities"]}}} +class Controller(object): def index(self, req): """Return all zones in brief""" @@ -96,17 +92,15 @@ class Controller(common.OpenstackController): api.zone_delete(req.environ['nova.context'], zone_id) return {} - def create(self, req): + def create(self, req, body): context = req.environ['nova.context'] - env = self._deserialize(req.body, req.get_content_type()) - zone = api.zone_create(context, env["zone"]) + zone = api.zone_create(context, body["zone"]) return dict(zone=_scrub_zone(zone)) - def update(self, req, id): + def update(self, req, id, body): context = req.environ['nova.context'] - env = self._deserialize(req.body, req.get_content_type()) zone_id = int(id) - zone = api.zone_update(context, zone_id, env["zone"]) + zone = api.zone_update(context, zone_id, body["zone"]) return dict(zone=_scrub_zone(zone)) def select(self, req): @@ -140,3 +134,18 @@ class Controller(common.OpenstackController): cooked.append(dict(weight=entry['weight'], blob=cipher_text)) return cooked + + +def create_resource(): + metadata = { + "attributes": { + "zone": ["id", "api_url", "name", "capabilities"], + }, + } + + serializers = { + 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10, + metadata=metadata), + } + + return wsgi.Resource(Controller(), serializers=serializers) diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 3f8432851..183f7a985 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -24,6 +24,7 @@ other backends by creating another class that exposes the same public methods. """ +import functools import sys from nova import exception @@ -68,6 +69,12 @@ flags.DEFINE_string('ldap_developer', LOG = logging.getLogger("nova.ldapdriver") +if FLAGS.memcached_servers: + import memcache +else: + from nova import fakememcache as memcache + + # TODO(vish): make an abstract base class with the same public methods # to define a set interface for AuthDrivers. I'm delaying # creating this now because I'm expecting an auth refactor @@ -85,6 +92,7 @@ def _clean(attr): def sanitize(fn): """Decorator to sanitize all args""" + @functools.wraps(fn) def _wrapped(self, *args, **kwargs): args = [_clean(x) for x in args] kwargs = dict((k, _clean(v)) for (k, v) in kwargs) @@ -103,29 +111,56 @@ class LdapDriver(object): isadmin_attribute = 'isNovaAdmin' project_attribute = 'owner' project_objectclass = 'groupOfNames' + conn = None + mc = None def __init__(self): """Imports the LDAP module""" self.ldap = __import__('ldap') - self.conn = None if FLAGS.ldap_schema_version == 1: LdapDriver.project_pattern = '(objectclass=novaProject)' LdapDriver.isadmin_attribute = 'isAdmin' LdapDriver.project_attribute = 'projectManager' LdapDriver.project_objectclass = 'novaProject' + self.__cache = None + if LdapDriver.conn is None: + LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) + LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, + FLAGS.ldap_password) + if LdapDriver.mc is None: + LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def __enter__(self): - """Creates the connection to LDAP""" - self.conn = self.ldap.initialize(FLAGS.ldap_url) - self.conn.simple_bind_s(FLAGS.ldap_user_dn, FLAGS.ldap_password) + # TODO(yorik-sar): Should be per-request cache, not per-driver-request + self.__cache = {} return self def __exit__(self, exc_type, exc_value, traceback): - """Destroys the connection to LDAP""" - self.conn.unbind_s() + self.__cache = None return False + def __local_cache(key_fmt): + """Wrap function to cache it's result in self.__cache. + Works only with functions with one fixed argument. + """ + def do_wrap(fn): + @functools.wraps(fn) + def inner(self, arg, **kwargs): + cache_key = key_fmt % (arg,) + try: + res = self.__cache[cache_key] + LOG.debug('Local cache hit for %s by key %s' % + (fn.__name__, cache_key)) + return res + except KeyError: + res = fn(self, arg, **kwargs) + self.__cache[cache_key] = res + return res + return inner + return do_wrap + @sanitize + @__local_cache('uid_user-%s') def get_user(self, uid): """Retrieve user by id""" attr = self.__get_ldap_user(uid) @@ -134,15 +169,31 @@ class LdapDriver(object): @sanitize def get_user_from_access_key(self, access): """Retrieve user by access key""" + cache_key = 'uak_dn_%s' % (access,) + user_dn = self.mc.get(cache_key) + if user_dn: + user = self.__to_user( + self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE)) + if user: + if user['access'] == access: + return user + else: + self.mc.set(cache_key, None) query = '(accessKey=%s)' % access dn = FLAGS.ldap_user_subtree - return self.__to_user(self.__find_object(dn, query)) + user_obj = self.__find_object(dn, query) + user = self.__to_user(user_obj) + if user: + self.mc.set(cache_key, user_obj['dn'][0]) + return user @sanitize + @__local_cache('pid_project-%s') def get_project(self, pid): """Retrieve project by id""" - dn = self.__project_to_dn(pid) - attr = self.__find_object(dn, LdapDriver.project_pattern) + dn = self.__project_to_dn(pid, search=False) + attr = self.__find_object(dn, LdapDriver.project_pattern, + scope=self.ldap.SCOPE_BASE) return self.__to_project(attr) @sanitize @@ -395,6 +446,7 @@ class LdapDriver(object): """Check if project exists""" return self.get_project(project_id) is not None + @__local_cache('uid_attrs-%s') def __get_ldap_user(self, uid): """Retrieve LDAP user entry by id""" dn = FLAGS.ldap_user_subtree @@ -426,12 +478,20 @@ class LdapDriver(object): if scope is None: # One of the flags is 0! scope = self.ldap.SCOPE_SUBTREE + if query is None: + query = "(objectClass=*)" try: res = self.conn.search_s(dn, scope, query) except self.ldap.NO_SUCH_OBJECT: return [] # Just return the attributes - return [attributes for dn, attributes in res] + # FIXME(yorik-sar): Whole driver should be refactored to + # prevent this hack + res1 = [] + for dn, attrs in res: + attrs['dn'] = [dn] + res1.append(attrs) + return res1 def __find_role_dns(self, tree): """Find dns of role objects in given tree""" @@ -564,6 +624,7 @@ class LdapDriver(object): 'description': attr.get('description', [None])[0], 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} + @__local_cache('uid_dn-%s') def __uid_to_dn(self, uid, search=True): """Convert uid to dn""" # By default return a generated DN @@ -576,6 +637,7 @@ class LdapDriver(object): userdn = user[0] return userdn + @__local_cache('pid_dn-%s') def __project_to_dn(self, pid, search=True): """Convert pid to dn""" # By default return a generated DN @@ -603,16 +665,18 @@ class LdapDriver(object): else: return None + @__local_cache('dn_uid-%s') def __dn_to_uid(self, dn): """Convert user dn to uid""" query = '(objectclass=novaUser)' - user = self.__find_object(dn, query) + user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE) return user[FLAGS.ldap_user_id_attribute][0] class FakeLdapDriver(LdapDriver): """Fake Ldap Auth driver""" - def __init__(self): # pylint: disable=W0231 - __import__('nova.auth.fakeldap') - self.ldap = sys.modules['nova.auth.fakeldap'] + def __init__(self): + import nova.auth.fakeldap + sys.modules['ldap'] = nova.auth.fakeldap + super(FakeLdapDriver, self).__init__() diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 07235a2a7..98c7dd263 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -73,6 +73,12 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', LOG = logging.getLogger('nova.auth.manager') +if FLAGS.memcached_servers: + import memcache +else: + from nova import fakememcache as memcache + + class AuthBase(object): """Base class for objects relating to auth @@ -206,6 +212,7 @@ class AuthManager(object): """ _instance = None + mc = None def __new__(cls, *args, **kwargs): """Returns the AuthManager singleton""" @@ -222,13 +229,8 @@ class AuthManager(object): self.network_manager = utils.import_object(FLAGS.network_manager) if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) - - if FLAGS.memcached_servers: - import memcache - else: - from nova import fakememcache as memcache - self.mc = memcache.Client(FLAGS.memcached_servers, - debug=0) + if AuthManager.mc is None: + AuthManager.mc = memcache.Client(FLAGS.memcached_servers, debug=0) def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 8170fcafe..eba3a8537 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -1,4 +1,6 @@ -NOVA_KEY_DIR=$(dirname $(readlink -f ${BASH_SOURCE})) +NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) || + NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}") +NOVA_KEY_DIR=${NOVARC%%/*} export EC2_ACCESS_KEY="%(access)s:%(project)s" export EC2_SECRET_KEY="%(secret)s" export EC2_URL="%(ec2)s" diff --git a/nova/compute/api.py b/nova/compute/api.py index 4f2363387..4f327fab1 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -18,7 +18,6 @@ """Handles all requests relating to instances (guest vms).""" -import datetime import eventlet import re import time @@ -26,6 +25,7 @@ import time from nova import db from nova import exception from nova import flags +import nova.image from nova import log as logging from nova import network from nova import quota @@ -58,9 +58,9 @@ class API(base.Base): def __init__(self, image_service=None, network_api=None, volume_api=None, hostname_factory=generate_default_hostname, **kwargs): - if not image_service: - image_service = utils.import_object(FLAGS.image_service) - self.image_service = image_service + self.image_service = image_service or \ + nova.image.get_default_image_service() + if not network_api: network_api = network.API() self.network_api = network_api @@ -91,7 +91,6 @@ class API(base.Base): """Enforce quota limits on injected files. Raises a QuotaError if any limit is exceeded. - """ if injected_files is None: return @@ -130,7 +129,7 @@ class API(base.Base): raise quota.QuotaError(msg, "MetadataLimitExceeded") def create(self, context, instance_type, - image_id, kernel_id=None, ramdisk_id=None, + image_href, kernel_id=None, ramdisk_id=None, min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', @@ -140,7 +139,6 @@ class API(base.Base): """Create the number and type of instances requested. Verifies that quota and other arguments are valid. - """ if not instance_type: instance_type = instance_types.get_default_instance_type() @@ -162,7 +160,8 @@ class API(base.Base): self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) - image = self.image_service.show(context, image_id) + (image_service, image_id) = nova.image.get_image_service(image_href) + image = image_service.show(context, image_id) os_type = None if 'properties' in image and 'os_type' in image['properties']: @@ -182,9 +181,9 @@ class API(base.Base): logging.debug("Using Kernel=%s, Ramdisk=%s" % (kernel_id, ramdisk_id)) if kernel_id: - self.image_service.show(context, kernel_id) + image_service.show(context, kernel_id) if ramdisk_id: - self.image_service.show(context, ramdisk_id) + image_service.show(context, ramdisk_id) if security_group is None: security_group = ['default'] @@ -205,7 +204,7 @@ class API(base.Base): base_options = { 'reservation_id': utils.generate_uid('r'), - 'image_id': image_id, + 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'state': 0, @@ -268,10 +267,17 @@ class API(base.Base): {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id, - "instance_type": instance_type, + "request_spec": { + 'instance_type': instance_type, + 'filter': + 'nova.scheduler.host_filter.' + 'InstanceTypeFilter', + }, "availability_zone": availability_zone, "injected_files": injected_files, - "admin_password": admin_password}}) + "admin_password": admin_password, + }, + }) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) @@ -294,7 +300,6 @@ class API(base.Base): already exist. :param context: the security context - """ try: db.security_group_get_by_name(context, context.project_id, @@ -327,7 +332,6 @@ class API(base.Base): Sends an update request to each compute node for whom this is relevant. - """ # First, we get the security group rules that reference this group as # the grantee.. @@ -374,7 +378,6 @@ class API(base.Base): updated :returns: None - """ rv = self.db.instance_update(context, instance_id, kwargs) return dict(rv.iteritems()) @@ -403,7 +406,7 @@ class API(base.Base): instance['id'], state_description='terminating', state=0, - terminated_at=datetime.datetime.utcnow()) + terminated_at=utils.utcnow()) host = instance['host'] if host: @@ -424,7 +427,6 @@ class API(base.Base): Use this method instead of get() if this is the only operation you intend to to. It will route to novaclient.get if the instance is not found. - """ return self.get(context, instance_id) @@ -434,7 +436,6 @@ class API(base.Base): If there is no filter and the context is an admin, it will retreive all instances in the system. - """ if reservation_id is not None: return self.db.instance_get_all_by_reservation( @@ -464,7 +465,6 @@ class API(base.Base): compute worker :returns: None - """ if not params: params = {} @@ -514,12 +514,12 @@ class API(base.Base): """Snapshot the given instance. :returns: A dict containing image metadata - """ properties = {'instance_id': str(instance_id), - 'user_id': str(context.user_id)} + 'user_id': str(context.user_id), + 'image_state': 'creating'} sent_meta = {'name': name, 'is_public': False, - 'properties': properties} + 'status': 'creating', 'properties': properties} recv_meta = self.image_service.create(context, sent_meta) params = {'image_id': recv_meta['id']} self._cast_compute_message('snapshot_instance', context, instance_id, @@ -530,8 +530,8 @@ class API(base.Base): """Reboot the given instance.""" self._cast_compute_message('reboot_instance', context, instance_id) - def rebuild(self, context, instance_id, image_id, metadata=None, - files_to_inject=None): + def rebuild(self, context, instance_id, image_href, name=None, + metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) @@ -539,16 +539,19 @@ class API(base.Base): msg = _("Instance already building") raise exception.BuildInProgress(msg) - metadata = metadata or {} - self._check_metadata_properties_quota(context, metadata) - files_to_inject = files_to_inject or [] self._check_injected_file_quota(context, files_to_inject) - self.db.instance_update(context, instance_id, {"metadata": metadata}) + values = {} + if metadata is not None: + self._check_metadata_properties_quota(context, metadata) + values['metadata'] = metadata + if name is not None: + values['display_name'] = name + self.db.instance_update(context, instance_id, values) rebuild_params = { - "image_id": image_id, + "image_ref": image_href, "injected_files": files_to_inject, } diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d1e01f275..245958de7 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -35,7 +35,6 @@ terminating it. """ -import datetime import os import socket import sys @@ -159,12 +158,12 @@ class ComputeManager(manager.SchedulerDependentManager): def _update_launched_at(self, context, instance_id, launched_at=None): """Update the launched_at parameter of the given instance.""" - data = {'launched_at': launched_at or datetime.datetime.utcnow()} + data = {'launched_at': launched_at or utils.utcnow()} self.db.instance_update(context, instance_id, data) - def _update_image_id(self, context, instance_id, image_id): + def _update_image_ref(self, context, instance_id, image_ref): """Update the image_id for the given instance.""" - data = {'image_id': image_id} + data = {'image_ref': image_ref} self.db.instance_update(context, instance_id, data) def get_console_topic(self, context, **kwargs): @@ -235,7 +234,7 @@ class ComputeManager(manager.SchedulerDependentManager): power_state.NOSTATE, 'networking') - is_vpn = instance_ref['image_id'] == str(FLAGS.vpn_image_id) + is_vpn = instance_ref['image_ref'] == str(FLAGS.vpn_image_id) # NOTE(vish): This could be a cast because we don't do anything # with the address currently, but I'm leaving it as # a call to ensure that network setup completes. We @@ -331,7 +330,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock - def rebuild_instance(self, context, instance_id, image_id): + def rebuild_instance(self, context, instance_id, **kwargs): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and @@ -339,7 +338,7 @@ class ComputeManager(manager.SchedulerDependentManager): :param context: `nova.RequestContext` object :param instance_id: Instance identifier (integer) - :param image_id: Image identifier (integer) + :param image_ref: Image identifier (href or integer) """ context = context.elevated() @@ -349,10 +348,12 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id, power_state.BUILDING) self.driver.destroy(instance_ref) - instance_ref.image_id = image_id + image_ref = kwargs.get('image_ref') + instance_ref.image_ref = image_ref + instance_ref.injected_files = kwargs.get('injected_files', []) self.driver.spawn(instance_ref) - self._update_image_id(context, instance_id, image_id) + self._update_image_ref(context, instance_id, image_ref) self._update_launched_at(context, instance_id) self._update_state(context, instance_id) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 3bb54a382..613734bef 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -86,7 +86,7 @@ RRD_VALUES = { ]} -utcnow = datetime.datetime.utcnow +utcnow = utils.utcnow LOG = logging.getLogger('nova.compute.monitor') diff --git a/nova/context.py b/nova/context.py index c113f7ea7..99085ed75 100644 --- a/nova/context.py +++ b/nova/context.py @@ -18,7 +18,6 @@ """RequestContext: context for requests that persist through all of nova.""" -import datetime import random from nova import exception diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index d53b76f4a..56739e9db 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -19,7 +19,6 @@ Implementation of SQLAlchemy backend. """ -import datetime import warnings from nova import db @@ -674,7 +673,7 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time): filter_by(allocated=0).\ update({'instance_id': None, 'leased': 0, - 'updated_at': datetime.datetime.utcnow()}, + 'updated_at': utils.utcnow()}, synchronize_session='fetch') return result @@ -771,6 +770,15 @@ def fixed_ip_update(context, address, values): ################### +def _metadata_refs(metadata_dict): + metadata_refs = [] + if metadata_dict: + for k, v in metadata_dict.iteritems(): + metadata_ref = models.InstanceMetadata() + metadata_ref['key'] = k + metadata_ref['value'] = v + metadata_refs.append(metadata_ref) + return metadata_refs @require_context @@ -780,15 +788,7 @@ def instance_create(context, values): context - request context object values - dict containing column values. """ - metadata = values.get('metadata') - metadata_refs = [] - if metadata: - for k, v in metadata.iteritems(): - metadata_ref = models.InstanceMetadata() - metadata_ref['key'] = k - metadata_ref['value'] = v - metadata_refs.append(metadata_ref) - values['metadata'] = metadata_refs + values['metadata'] = _metadata_refs(values.get('metadata')) instance_ref = models.Instance() instance_ref.update(values) @@ -819,17 +819,17 @@ def instance_destroy(context, instance_id): session.query(models.Instance).\ filter_by(id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -955,7 +955,7 @@ def instance_get_project_vpn(context, project_id): options(joinedload('security_groups')).\ options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ - filter_by(image_id=str(FLAGS.vpn_image_id)).\ + filter_by(image_ref=str(FLAGS.vpn_image_id)).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -1010,6 +1010,11 @@ def instance_set_state(context, instance_id, state, description=None): @require_context def instance_update(context, instance_id, values): session = get_session() + metadata = values.get('metadata') + if metadata is not None: + instance_metadata_delete_all(context, instance_id) + instance_metadata_update_or_create(context, instance_id, + values.pop('metadata')) with session.begin(): instance_ref = instance_get(context, instance_id, session=session) instance_ref.update(values) @@ -1117,7 +1122,7 @@ def key_pair_destroy_all_by_user(context, user_id): session.query(models.KeyPair).\ filter_by(user_id=user_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1649,7 +1654,7 @@ def volume_destroy(context, volume_id): session.query(models.Volume).\ filter_by(id=volume_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.ExportDevice).\ filter_by(volume_id=volume_id).\ @@ -1807,7 +1812,7 @@ def snapshot_destroy(context, snapshot_id): session.query(models.Snapshot).\ filter_by(id=snapshot_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1962,17 +1967,17 @@ def security_group_destroy(context, security_group_id): session.query(models.SecurityGroup).\ filter_by(id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -1983,11 +1988,11 @@ def security_group_destroy_all(context, session=None): with session.begin(): session.query(models.SecurityGroup).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -2621,7 +2626,18 @@ def instance_metadata_delete(context, instance_id, key): filter_by(key=key).\ filter_by(deleted=False).\ update({'deleted': True, - 'deleted_at': datetime.datetime.utcnow(), + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def instance_metadata_delete_all(context, instance_id): + session = get_session() + session.query(models.InstanceMetadata).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -2644,6 +2660,9 @@ def instance_metadata_get_item(context, instance_id, key): @require_context def instance_metadata_update_or_create(context, instance_id, metadata): session = get_session() + + original_metadata = instance_metadata_get(context, instance_id) + meta_ref = None for key, value in metadata.iteritems(): try: @@ -2655,4 +2674,5 @@ def instance_metadata_update_or_create(context, instance_id, metadata): "instance_id": instance_id, "deleted": 0}) meta_ref.save(session=session) + return metadata diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py index a2d8192ca..a4fe3e482 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -17,7 +17,7 @@ from sqlalchemy import Boolean, Column, DateTime, Integer from sqlalchemy import MetaData, String, Table -import datetime +from nova import utils meta = MetaData() @@ -35,9 +35,9 @@ def old_style_quotas_table(name): return Table(name, meta, Column('id', Integer(), primary_key=True), Column('created_at', DateTime(), - default=datetime.datetime.utcnow), + default=utils.utcnow), Column('updated_at', DateTime(), - onupdate=datetime.datetime.utcnow), + onupdate=utils.utcnow), Column('deleted_at', DateTime()), Column('deleted', Boolean(), default=False), Column('project_id', @@ -57,9 +57,9 @@ def new_style_quotas_table(name): return Table(name, meta, Column('id', Integer(), primary_key=True), Column('created_at', DateTime(), - default=datetime.datetime.utcnow), + default=utils.utcnow), Column('updated_at', DateTime(), - onupdate=datetime.datetime.utcnow), + onupdate=utils.utcnow), Column('deleted_at', DateTime()), Column('deleted', Boolean(), default=False), Column('project_id', @@ -160,7 +160,7 @@ def convert_backward(migrate_engine, old_quotas, new_quotas): 'project_id': quota.project_id, 'created_at': quota.created_at, 'updated_at': quota.updated_at, - quota.resource: quota.hard_limit + quota.resource: quota.hard_limit, } else: quotas[quota.project_id]['created_at'] = earliest( diff --git a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py index a169afb40..73c76f666 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py @@ -14,23 +14,10 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import Column, Integer, MetaData, String, Table -#from nova import log as logging +from sqlalchemy import MetaData, Table meta = MetaData() -c_manageent = Column('server_manageent_url', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - -c_management = Column('server_management_url', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - def upgrade(migrate_engine): # Upgrade operations go here. Don't create your own engine; @@ -40,11 +27,8 @@ def upgrade(migrate_engine): tokens = Table('auth_tokens', meta, autoload=True, autoload_with=migrate_engine) - tokens.create_column(c_management) - migrate_engine.execute(tokens.update() - .values(server_management_url=tokens.c.server_manageent_url)) - - tokens.c.server_manageent_url.drop() + c_manageent = tokens.c.server_manageent_url + c_manageent.alter(name='server_management_url') def downgrade(migrate_engine): @@ -53,8 +37,5 @@ def downgrade(migrate_engine): tokens = Table('auth_tokens', meta, autoload=True, autoload_with=migrate_engine) - tokens.create_column(c_manageent) - migrate_engine.execute(tokens.update() - .values(server_manageent_url=tokens.c.server_management_url)) - - tokens.c.server_management_url.drop() + c_management = tokens.c.server_management_url + c_management.alter(name='server_manageent_url') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py new file mode 100644 index 000000000..10bd9d5c9 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/020_add_snapshot_id_to_volumes.py @@ -0,0 +1,47 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 MORITA Kazutaka. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Table, MetaData, Integer + +from nova import log as logging + + +meta = MetaData() + + +# Table stub-definitions +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +# +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Column +# + +snapshot_id = Column('snapshot_id', Integer()) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + # Add columns to existing tables + volumes.create_column(snapshot_id) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py b/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py new file mode 100644 index 000000000..73a5e8477 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/021_rename_image_ids.py @@ -0,0 +1,40 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + + +meta = MetaData() + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + image_id_column = instances.c.image_id + image_id_column.alter(name='image_ref') + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + image_ref_column = instances.c.image_ref + image_ref_column.alter(name='image_id') diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 480f62399..55efe6886 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -19,8 +19,6 @@ SQLAlchemy models for nova data. """ -import datetime - from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy import Column, Integer, String, schema from sqlalchemy import ForeignKey, DateTime, Boolean, Text @@ -33,6 +31,7 @@ from nova.db.sqlalchemy.session import get_session from nova import auth from nova import exception from nova import flags +from nova import utils FLAGS = flags.FLAGS @@ -43,8 +42,8 @@ class NovaBase(object): """Base class for Nova Models.""" __table_args__ = {'mysql_engine': 'InnoDB'} __table_initialized__ = False - created_at = Column(DateTime, default=datetime.datetime.utcnow) - updated_at = Column(DateTime, onupdate=datetime.datetime.utcnow) + created_at = Column(DateTime, default=utils.utcnow) + updated_at = Column(DateTime, onupdate=utils.utcnow) deleted_at = Column(DateTime) deleted = Column(Boolean, default=False) @@ -64,7 +63,7 @@ class NovaBase(object): def delete(self, session=None): """Delete this object.""" self.deleted = True - self.deleted_at = datetime.datetime.utcnow() + self.deleted_at = utils.utcnow() self.save(session=session) def __setitem__(self, key, value): @@ -184,11 +183,11 @@ class Instance(BASE, NovaBase): def project(self): return auth.manager.AuthManager().get_project(self.project_id) - image_id = Column(String(255)) + image_ref = Column(String(255)) kernel_id = Column(String(255)) ramdisk_id = Column(String(255)) -# image_id = Column(Integer, ForeignKey('images.id'), nullable=True) +# image_ref = Column(Integer, ForeignKey('images.id'), nullable=True) # kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True) # ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True) # ramdisk = relationship(Ramdisk, backref=backref('instances', order_by=id)) @@ -287,6 +286,8 @@ class Volume(BASE, NovaBase): user_id = Column(String(255)) project_id = Column(String(255)) + snapshot_id = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) size = Column(Integer) availability_zone = Column(String(255)) # TODO(vish): foreign key? diff --git a/nova/exception.py b/nova/exception.py index 02c65fd64..69b3e0359 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -65,7 +65,7 @@ class BuildInProgress(Error): class DBError(Error): """Wraps an implementation specific exception.""" - def __init__(self, inner_exception): + def __init__(self, inner_exception=None): self.inner_exception = inner_exception super(DBError, self).__init__(str(inner_exception)) @@ -122,7 +122,7 @@ class NotAuthorized(NovaException): message = _("Not authorized.") def __init__(self, *args, **kwargs): - super(NotFound, self).__init__(**kwargs) + super(NotAuthorized, self).__init__(**kwargs) class AdminRequired(NotAuthorized): @@ -291,6 +291,15 @@ class DiskNotFound(NotFound): message = _("No disk at %(location)s") +class InvalidImageRef(Invalid): + message = _("Invalid image href %(image_href)s.") + + +class ListingImageRefsNotSupported(Invalid): + message = _("Some images have been stored via hrefs." + + " This version of the api does not support displaying image hrefs.") + + class ImageNotFound(NotFound): message = _("Image %(image_id)s could not be found.") @@ -473,11 +482,19 @@ class ZoneNotFound(NotFound): message = _("Zone %(zone_id)s could not be found.") -class SchedulerHostFilterDriverNotFound(NotFound): - message = _("Scheduler Host Filter Driver %(driver_name)s could" +class SchedulerHostFilterNotFound(NotFound): + message = _("Scheduler Host Filter %(filter_name)s could not be found.") + + +class SchedulerCostFunctionNotFound(NotFound): + message = _("Scheduler cost function %(cost_fn_str)s could" " not be found.") +class SchedulerWeightFlagNotFound(NotFound): + message = _("Scheduler weight flag not found: %(flag_name)s") + + class InstanceMetadataNotFound(NotFound): message = _("Instance %(instance_id)s has no metadata with " "key %(metadata_key)s.") diff --git a/nova/flags.py b/nova/flags.py index 9eaac5596..d5090edba 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -296,6 +296,7 @@ DEFINE_bool('fake_network', False, 'should we use fake network devices and addresses') DEFINE_string('rabbit_host', 'localhost', 'rabbit host') DEFINE_integer('rabbit_port', 5672, 'rabbit port') +DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') diff --git a/nova/image/__init__.py b/nova/image/__init__.py index e69de29bb..93d83df24 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -0,0 +1,93 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from urlparse import urlparse + +import nova +from nova import exception +from nova import utils +from nova import flags + +FLAGS = flags.FLAGS + + +GlanceClient = utils.import_class('glance.client.Client') + + +def _parse_image_ref(image_href): + """Parse an image href into composite parts. + + :param image_href: href of an image + :returns: a tuple of the form (image_id, host, port) + + """ + o = urlparse(image_href) + port = o.port or 80 + host = o.netloc.split(':', 1)[0] + image_id = int(o.path.split('/')[-1]) + return (image_id, host, port) + + +def get_default_image_service(): + ImageService = utils.import_class(FLAGS.image_service) + return ImageService() + + +def get_glance_client(image_href): + """Get the correct glance client and id for the given image_href. + + The image_href param can be an href of the form + http://myglanceserver:9292/images/42, or just an int such as 42. If the + image_href is an int, then flags are used to create the default + glance client. + + :param image_href: image ref/id for an image + :returns: a tuple of the form (glance_client, image_id) + + """ + image_href = image_href or 0 + if str(image_href).isdigit(): + glance_client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port) + return (glance_client, int(image_href)) + + try: + (image_id, host, port) = _parse_image_ref(image_href) + except: + raise exception.InvalidImageRef(image_href=image_href) + glance_client = GlanceClient(host, port) + return (glance_client, image_id) + + +def get_image_service(image_href): + """Get the proper image_service and id for the given image_href. + + The image_href param can be an href of the form + http://myglanceserver:9292/images/42, or just an int such as 42. If the + image_href is an int, then the default image service is returned. + + :param image_href: image ref/id for an image + :returns: a tuple of the form (image_service, image_id) + + """ + image_href = image_href or 0 + if str(image_href).isdigit(): + return (get_default_image_service(), int(image_href)) + + (glance_client, image_id) = get_glance_client(image_href) + image_service = nova.image.glance.GlanceImageService(glance_client) + return (image_service, image_id) diff --git a/nova/image/fake.py b/nova/image/fake.py index b400b2adb..63966244b 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -19,6 +19,7 @@ import copy import datetime +import random from nova import exception from nova import flags @@ -32,7 +33,7 @@ LOG = logging.getLogger('nova.image.fake') FLAGS = flags.FLAGS -class FakeImageService(service.BaseImageService): +class _FakeImageService(service.BaseImageService): """Mock (fake) image service for unit testing.""" def __init__(self): @@ -40,7 +41,48 @@ class FakeImageService(service.BaseImageService): # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03) - image = {'id': '123456', + image1 = {'id': '123456', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'status': 'active', + 'container_format': 'ami', + 'disk_format': 'raw', + 'properties': {'kernel_id': FLAGS.null_kernel, + 'ramdisk_id': FLAGS.null_kernel, + 'architecture': 'x86_64'}} + + image2 = {'id': 'fake', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'status': 'active', + 'container_format': 'ami', + 'disk_format': 'raw', + 'properties': {'kernel_id': FLAGS.null_kernel, + 'ramdisk_id': FLAGS.null_kernel}} + + image3 = {'id': '2', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'status': 'active', + 'container_format': 'ami', + 'disk_format': 'raw', + 'properties': {'kernel_id': FLAGS.null_kernel, + 'ramdisk_id': FLAGS.null_kernel}} + + image4 = {'id': '1', + 'name': 'fakeimage123456', + 'created_at': timestamp, + 'updated_at': timestamp, + 'status': 'active', + 'container_format': 'ami', + 'disk_format': 'raw', + 'properties': {'kernel_id': FLAGS.null_kernel, + 'ramdisk_id': FLAGS.null_kernel}} + + image5 = {'id': '3', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, @@ -49,14 +91,19 @@ class FakeImageService(service.BaseImageService): 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} - self.create(None, image) - super(FakeImageService, self).__init__() - def index(self, context): + self.create(None, image1) + self.create(None, image2) + self.create(None, image3) + self.create(None, image4) + self.create(None, image5) + super(_FakeImageService, self).__init__() + + def index(self, context, filters=None): """Returns list of images.""" return copy.deepcopy(self.images.values()) - def detail(self, context): + def detail(self, context, filters=None): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) @@ -66,36 +113,41 @@ class FakeImageService(service.BaseImageService): Returns a dict containing image data for the given opaque image id. """ - image_id = int(image_id) - image = self.images.get(image_id) + image = self.images.get(str(image_id)) if image: return copy.deepcopy(image) LOG.warn('Unable to find image id %s. Have images: %s', image_id, self.images) raise exception.ImageNotFound(image_id=image_id) - def create(self, context, data): + def create(self, context, metadata, data=None): """Store the image data and return the new image id. :raises: Duplicate if the image already exist. """ - image_id = int(data['id']) + try: + image_id = metadata['id'] + except KeyError: + image_id = random.randint(0, 2 ** 31 - 1) + image_id = str(image_id) + if self.images.get(image_id): raise exception.Duplicate() - self.images[image_id] = copy.deepcopy(data) + metadata['id'] = image_id + self.images[image_id] = copy.deepcopy(metadata) + return self.images[image_id] - def update(self, context, image_id, data): + def update(self, context, image_id, metadata, data=None): """Replace the contents of the given image with the new data. :raises: ImageNotFound if the image does not exist. """ - image_id = int(image_id) if not self.images.get(image_id): raise exception.ImageNotFound(image_id=image_id) - self.images[image_id] = copy.deepcopy(data) + self.images[image_id] = copy.deepcopy(metadata) def delete(self, context, image_id): """Delete the given image. @@ -103,7 +155,6 @@ class FakeImageService(service.BaseImageService): :raises: ImageNotFound if the image does not exist. """ - image_id = int(image_id) removed = self.images.pop(image_id, None) if not removed: raise exception.ImageNotFound(image_id=image_id) @@ -111,3 +162,9 @@ class FakeImageService(service.BaseImageService): def delete_all(self): """Clears out all images.""" self.images.clear() + +_fakeImageService = _FakeImageService() + + +def FakeImageService(): + return _fakeImageService diff --git a/nova/image/glance.py b/nova/image/glance.py index 193e37273..dec797619 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -58,23 +58,23 @@ class GlanceImageService(service.BaseImageService): else: self.client = client - def index(self, context): + def index(self, context, filters=None): """Calls out to Glance for a list of images available.""" # NOTE(sirp): We need to use `get_images_detailed` and not # `get_images` here because we need `is_public` and `properties` # included so we can filter by user filtered = [] - image_metas = self.client.get_images_detailed() + image_metas = self.client.get_images_detailed(filters=filters) for image_meta in image_metas: if self._is_image_available(context, image_meta): meta_subset = utils.subset_dict(image_meta, ('id', 'name')) filtered.append(meta_subset) return filtered - def detail(self, context): + def detail(self, context, filters=None): """Calls out to Glance for a list of detailed image information.""" filtered = [] - image_metas = self.client.get_images_detailed() + image_metas = self.client.get_images_detailed(filters=filters) for image_meta in image_metas: if self._is_image_available(context, image_meta): base_image_meta = self._translate_to_base(image_meta) diff --git a/nova/image/local.py b/nova/image/local.py index 918180bae..677d5302b 100644 --- a/nova/image/local.py +++ b/nova/image/local.py @@ -63,7 +63,7 @@ class LocalImageService(service.BaseImageService): images.append(unhexed_image_id) return images - def index(self, context): + def index(self, context, *args, **kwargs): filtered = [] image_metas = self.detail(context) for image_meta in image_metas: @@ -71,7 +71,7 @@ class LocalImageService(service.BaseImageService): filtered.append(meta) return filtered - def detail(self, context): + def detail(self, context, *args, **kwargs): images = [] for image_id in self._ids(): try: diff --git a/nova/image/s3.py b/nova/image/s3.py index c38c58d95..9e95bd698 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -31,12 +31,15 @@ import eventlet from nova import crypto from nova import exception from nova import flags +from nova import image +from nova import log as logging from nova import utils from nova.auth import manager from nova.image import service from nova.api.ec2 import ec2utils +LOG = logging.getLogger("nova.image.s3") FLAGS = flags.FLAGS flags.DEFINE_string('image_decryption_dir', '/tmp', 'parent dir for tempdir used for image decryption') @@ -46,9 +49,7 @@ class S3ImageService(service.BaseImageService): """Wraps an existing image service to support s3 based register.""" def __init__(self, service=None, *args, **kwargs): - if service is None: - service = utils.import_object(FLAGS.image_service) - self.service = service + self.service = service or image.get_default_image_service() self.service.__init__(*args, **kwargs) def create(self, context, metadata, data=None): @@ -161,43 +162,83 @@ class S3ImageService(service.BaseImageService): def delayed_create(): """This handles the fetching and decrypting of the part files.""" - parts = [] - for fn_element in manifest.find('image').getiterator('filename'): - part = self._download_file(bucket, fn_element.text, image_path) - parts.append(part) - - # NOTE(vish): this may be suboptimal, should we use cat? - encrypted_filename = os.path.join(image_path, 'image.encrypted') - with open(encrypted_filename, 'w') as combined: - for filename in parts: - with open(filename) as part: - shutil.copyfileobj(part, combined) - - metadata['properties']['image_state'] = 'decrypting' + metadata['properties']['image_state'] = 'downloading' self.service.update(context, image_id, metadata) - hex_key = manifest.find('image/ec2_encrypted_key').text - encrypted_key = binascii.a2b_hex(hex_key) - hex_iv = manifest.find('image/ec2_encrypted_iv').text - encrypted_iv = binascii.a2b_hex(hex_iv) + try: + parts = [] + elements = manifest.find('image').getiterator('filename') + for fn_element in elements: + part = self._download_file(bucket, + fn_element.text, + image_path) + parts.append(part) + + # NOTE(vish): this may be suboptimal, should we use cat? + enc_filename = os.path.join(image_path, 'image.encrypted') + with open(enc_filename, 'w') as combined: + for filename in parts: + with open(filename) as part: + shutil.copyfileobj(part, combined) + + except Exception: + LOG.error(_("Failed to download %(image_location)s " + "to %(image_path)s"), locals()) + metadata['properties']['image_state'] = 'failed_download' + self.service.update(context, image_id, metadata) + raise - # FIXME(vish): grab key from common service so this can run on - # any host. - cloud_pk = crypto.key_path(context.project_id) + metadata['properties']['image_state'] = 'decrypting' + self.service.update(context, image_id, metadata) - decrypted_filename = os.path.join(image_path, 'image.tar.gz') - self._decrypt_image(encrypted_filename, encrypted_key, - encrypted_iv, cloud_pk, decrypted_filename) + try: + hex_key = manifest.find('image/ec2_encrypted_key').text + encrypted_key = binascii.a2b_hex(hex_key) + hex_iv = manifest.find('image/ec2_encrypted_iv').text + encrypted_iv = binascii.a2b_hex(hex_iv) + + # FIXME(vish): grab key from common service so this can run on + # any host. + cloud_pk = crypto.key_path(context.project_id) + + dec_filename = os.path.join(image_path, 'image.tar.gz') + self._decrypt_image(enc_filename, encrypted_key, + encrypted_iv, cloud_pk, + dec_filename) + except Exception: + LOG.error(_("Failed to decrypt %(image_location)s " + "to %(image_path)s"), locals()) + metadata['properties']['image_state'] = 'failed_decrypt' + self.service.update(context, image_id, metadata) + raise metadata['properties']['image_state'] = 'untarring' self.service.update(context, image_id, metadata) - unz_filename = self._untarzip_image(image_path, decrypted_filename) + try: + unz_filename = self._untarzip_image(image_path, dec_filename) + except Exception: + LOG.error(_("Failed to untar %(image_location)s " + "to %(image_path)s"), locals()) + metadata['properties']['image_state'] = 'failed_untar' + self.service.update(context, image_id, metadata) + raise metadata['properties']['image_state'] = 'uploading' - with open(unz_filename) as image_file: - self.service.update(context, image_id, metadata, image_file) + self.service.update(context, image_id, metadata) + try: + with open(unz_filename) as image_file: + self.service.update(context, image_id, + metadata, image_file) + except Exception: + LOG.error(_("Failed to upload %(image_location)s " + "to %(image_path)s"), locals()) + metadata['properties']['image_state'] = 'failed_upload' + self.service.update(context, image_id, metadata) + raise + metadata['properties']['image_state'] = 'available' + metadata['status'] = 'active' self.service.update(context, image_id, metadata) shutil.rmtree(image_path) diff --git a/nova/image/service.py b/nova/image/service.py index ab6749049..5361cfc89 100644 --- a/nova/image/service.py +++ b/nova/image/service.py @@ -46,7 +46,7 @@ class BaseImageService(object): # the ImageService subclass SERVICE_IMAGE_ATTRS = [] - def index(self, context): + def index(self, context, *args, **kwargs): """List images. :returns: a sequence of mappings with the following signature @@ -55,7 +55,7 @@ class BaseImageService(object): """ raise NotImplementedError - def detail(self, context): + def detail(self, context, *args, **kwargs): """Detailed information about an images. :returns: a sequence of mappings with the following signature diff --git a/nova/log.py b/nova/log.py index 096279f7c..6909916a1 100644 --- a/nova/log.py +++ b/nova/log.py @@ -35,6 +35,7 @@ import os import sys import traceback +import nova from nova import flags from nova import version @@ -63,6 +64,7 @@ flags.DEFINE_list('default_log_levels', 'eventlet.wsgi.server=WARN'], 'list of logger=LEVEL pairs') flags.DEFINE_bool('use_syslog', False, 'output to syslog') +flags.DEFINE_bool('publish_errors', False, 'publish error events') flags.DEFINE_string('logfile', None, 'output to named file') @@ -258,12 +260,20 @@ class NovaRootLogger(NovaLogger): else: self.removeHandler(self.filelog) self.addHandler(self.streamlog) + if FLAGS.publish_errors: + self.addHandler(PublishErrorsHandler(ERROR)) if FLAGS.verbose: self.setLevel(DEBUG) else: self.setLevel(INFO) +class PublishErrorsHandler(logging.Handler): + def emit(self, record): + nova.notifier.api.notify('nova.error.publisher', 'error_notification', + nova.notifier.api.ERROR, dict(error=record.msg)) + + def handle_exception(type, value, tb): extra = {} if FLAGS.verbose: diff --git a/nova/network/manager.py b/nova/network/manager.py index 5a6fdde5a..f726c4b26 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -235,7 +235,7 @@ class NetworkManager(manager.SchedulerDependentManager): inst_addr = instance_ref['mac_address'] raise exception.Error(_('IP %(address)s leased to bad mac' ' %(inst_addr)s vs %(mac)s') % locals()) - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': True, diff --git a/nova/notifier/api.py b/nova/notifier/api.py index a3e7a039e..d49517c8b 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -11,9 +11,8 @@ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations -# under the License.import datetime +# under the License. -import datetime import uuid from nova import flags @@ -64,7 +63,7 @@ def notify(publisher_id, event_type, priority, payload): {'message_id': str(uuid.uuid4()), 'publisher_id': 'compute.host1', - 'timestamp': datetime.datetime.utcnow(), + 'timestamp': utils.utcnow(), 'priority': 'WARN', 'event_type': 'compute.create_instance', 'payload': {'instance_id': 12, ... }} @@ -79,5 +78,5 @@ def notify(publisher_id, event_type, priority, payload): event_type=event_type, priority=priority, payload=payload, - timestamp=str(datetime.datetime.utcnow())) + timestamp=str(utils.utcnow())) driver.notify(msg) diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py index dd6327c8f..76025a1e3 100644 --- a/nova/objectstore/s3server.py +++ b/nova/objectstore/s3server.py @@ -81,7 +81,7 @@ class S3Application(wsgi.Router): super(S3Application, self).__init__(mapper) -class BaseRequestHandler(wsgi.Controller): +class BaseRequestHandler(object): """Base class emulating Tornado's web framework pattern in WSGI. This is a direct port of Tornado's implementation, so some key decisions diff --git a/nova/rpc.py b/nova/rpc.py index c5277c6a9..2e78a31e7 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -65,6 +65,7 @@ class Connection(carrot_connection.BrokerConnection): if new or not hasattr(cls, '_instance'): params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, + ssl=FLAGS.rabbit_use_ssl, userid=FLAGS.rabbit_userid, password=FLAGS.rabbit_password, virtual_host=FLAGS.rabbit_virtual_host) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 2094e3565..0b257c5d8 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -28,6 +28,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import rpc +from nova import utils from nova.compute import power_state FLAGS = flags.FLAGS @@ -61,7 +62,7 @@ class Scheduler(object): """Check whether a service is up based on last heartbeat.""" last_heartbeat = service['updated_at'] or service['created_at'] # Timestamps in DB are UTC. - elapsed = datetime.datetime.utcnow() - last_heartbeat + elapsed = utils.utcnow() - last_heartbeat return elapsed < datetime.timedelta(seconds=FLAGS.service_down_time) def hosts_up(self, context, topic): diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 483f3225c..bd6b26608 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -14,8 +14,8 @@ # under the License. """ -Host Filter is a driver mechanism for requesting instance resources. -Three drivers are included: AllHosts, Flavor & JSON. AllHosts just +Host Filter is a mechanism for requesting instance resources. +Three filters are included: AllHosts, Flavor & JSON. AllHosts just returns the full, unfiltered list of hosts. Flavor is a hard coded matching mechanism based on flavor criteria and JSON is an ad-hoc filter grammar. @@ -41,18 +41,20 @@ import json from nova import exception from nova import flags from nova import log as logging +from nova.scheduler import zone_aware_scheduler from nova import utils +from nova.scheduler import zone_aware_scheduler LOG = logging.getLogger('nova.scheduler.host_filter') FLAGS = flags.FLAGS -flags.DEFINE_string('default_host_filter_driver', +flags.DEFINE_string('default_host_filter', 'nova.scheduler.host_filter.AllHostsFilter', - 'Which driver to use for filtering hosts.') + 'Which filter to use for filtering hosts.') class HostFilter(object): - """Base class for host filter drivers.""" + """Base class for host filters.""" def instance_type_to_filter(self, instance_type): """Convert instance_type into a filter for most common use-case.""" @@ -63,14 +65,15 @@ class HostFilter(object): raise NotImplementedError() def _full_name(self): - """module.classname of the filter driver""" + """module.classname of the filter.""" return "%s.%s" % (self.__module__, self.__class__.__name__) class AllHostsFilter(HostFilter): - """NOP host filter driver. Returns all hosts in ZoneManager. + """ NOP host filter. Returns all hosts in ZoneManager. This essentially does what the old Scheduler+Chance used - to give us.""" + to give us. + """ def instance_type_to_filter(self, instance_type): """Return anything to prevent base-class from raising @@ -83,8 +86,8 @@ class AllHostsFilter(HostFilter): for host, services in zone_manager.service_states.iteritems()] -class FlavorFilter(HostFilter): - """HostFilter driver hard-coded to work with flavors.""" +class InstanceTypeFilter(HostFilter): + """HostFilter hard-coded to work with InstanceType records.""" def instance_type_to_filter(self, instance_type): """Use instance_type to filter hosts.""" @@ -98,9 +101,10 @@ class FlavorFilter(HostFilter): capabilities = services.get('compute', {}) host_ram_mb = capabilities['host_memory_free'] disk_bytes = capabilities['disk_available'] - if host_ram_mb >= instance_type['memory_mb'] and \ - disk_bytes >= instance_type['local_gb']: - selected_hosts.append((host, capabilities)) + spec_ram = instance_type['memory_mb'] + spec_disk = instance_type['local_gb'] + if host_ram_mb >= spec_ram and disk_bytes >= spec_disk: + selected_hosts.append((host, capabilities)) return selected_hosts #host entries (currently) are like: @@ -109,15 +113,15 @@ class FlavorFilter(HostFilter): # 'host_memory_total': 8244539392, # 'host_memory_overhead': 184225792, # 'host_memory_free': 3868327936, -# 'host_memory_free_computed': 3840843776}, -# 'host_other-config': {}, +# 'host_memory_free_computed': 3840843776, +# 'host_other_config': {}, # 'host_ip_address': '192.168.1.109', # 'host_cpu_info': {}, # 'disk_available': 32954957824, # 'disk_total': 50394562560, -# 'disk_used': 17439604736}, +# 'disk_used': 17439604736, # 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', -# 'host_name-label': 'xs-mini'} +# 'host_name_label': 'xs-mini'} # instance_type table has: #name = Column(String(255), unique=True) @@ -131,8 +135,9 @@ class FlavorFilter(HostFilter): class JsonFilter(HostFilter): - """Host Filter driver to allow simple JSON-based grammar for - selecting hosts.""" + """Host Filter to allow simple JSON-based grammar for + selecting hosts. + """ def _equals(self, args): """First term is == all the other terms.""" @@ -222,13 +227,14 @@ class JsonFilter(HostFilter): required_disk = instance_type['local_gb'] query = ['and', ['>=', '$compute.host_memory_free', required_ram], - ['>=', '$compute.disk_available', required_disk] + ['>=', '$compute.disk_available', required_disk], ] return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): """Strings prefixed with $ are capability lookups in the - form '$service.capability[.subcap*]'""" + form '$service.capability[.subcap*]' + """ if not string: return None if string[0] != '$': @@ -271,18 +277,48 @@ class JsonFilter(HostFilter): return hosts -DRIVERS = [AllHostsFilter, FlavorFilter, JsonFilter] +FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] -def choose_driver(driver_name=None): - """Since the caller may specify which driver to use we need - to have an authoritative list of what is permissible. This - function checks the driver name against a predefined set - of acceptable drivers.""" +def choose_host_filter(filter_name=None): + """Since the caller may specify which filter to use we need + to have an authoritative list of what is permissible. This + function checks the filter name against a predefined set + of acceptable filters. + """ - if not driver_name: - driver_name = FLAGS.default_host_filter_driver - for driver in DRIVERS: - if "%s.%s" % (driver.__module__, driver.__name__) == driver_name: - return driver() - raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name) + if not filter_name: + filter_name = FLAGS.default_host_filter + for filter_class in FILTERS: + host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) + if host_match == filter_name: + return filter_class() + raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) + + +class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): + """The HostFilterScheduler uses the HostFilter to filter + hosts for weighing. The particular filter used may be passed in + as an argument or the default will be used. + + request_spec = {'filter': <Filter name>, + 'instance_type': <InstanceType dict>} + """ + + def filter_hosts(self, num, request_spec): + """Filter the full host list (from the ZoneManager)""" + filter_name = request_spec.get('filter', None) + host_filter = choose_host_filter(filter_name) + + # TODO(sandy): We're only using InstanceType-based specs + # currently. Later we'll need to snoop for more detailed + # host filter requests. + instance_type = request_spec['instance_type'] + name, query = host_filter.instance_type_to_filter(instance_type) + return host_filter.filter_hosts(self.zone_manager, query) + + def weigh_hosts(self, num, request_spec, hosts): + """Derived classes must override this method and return + a lists of hosts in [{weight, hostname}] format. + """ + return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py new file mode 100644 index 000000000..629fe2e42 --- /dev/null +++ b/nova/scheduler/least_cost.py @@ -0,0 +1,156 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Least Cost Scheduler is a mechanism for choosing which host machines to +provision a set of resources to. The input of the least-cost-scheduler is a +set of objective-functions, called the 'cost-functions', a weight for each +cost-function, and a list of candidate hosts (gathered via FilterHosts). + +The cost-function and weights are tabulated, and the host with the least cost +is then selected for provisioning. +""" + +import collections + +from nova import flags +from nova import log as logging +from nova.scheduler import zone_aware_scheduler +from nova import utils + +LOG = logging.getLogger('nova.scheduler.least_cost') + +FLAGS = flags.FLAGS +flags.DEFINE_list('least_cost_scheduler_cost_functions', + ['nova.scheduler.least_cost.noop_cost_fn'], + 'Which cost functions the LeastCostScheduler should use.') + + +# TODO(sirp): Once we have enough of these rules, we can break them out into a +# cost_functions.py file (perhaps in a least_cost_scheduler directory) +flags.DEFINE_integer('noop_cost_fn_weight', 1, + 'How much weight to give the noop cost function') + + +def noop_cost_fn(host): + """Return a pre-weight cost of 1 for each host""" + return 1 + + +flags.DEFINE_integer('fill_first_cost_fn_weight', 1, + 'How much weight to give the fill-first cost function') + + +def fill_first_cost_fn(host): + """Prefer hosts that have less ram available, filter_hosts will exclude + hosts that don't have enough ram""" + hostname, caps = host + free_mem = caps['compute']['host_memory_free'] + return free_mem + + +class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): + def get_cost_fns(self): + """Returns a list of tuples containing weights and cost functions to + use for weighing hosts + """ + cost_fns = [] + for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions: + + try: + # NOTE(sirp): import_class is somewhat misnamed since it can + # any callable from a module + cost_fn = utils.import_class(cost_fn_str) + except exception.ClassNotFound: + raise exception.SchedulerCostFunctionNotFound( + cost_fn_str=cost_fn_str) + + try: + weight = getattr(FLAGS, "%s_weight" % cost_fn.__name__) + except AttributeError: + raise exception.SchedulerWeightFlagNotFound( + flag_name=flag_name) + + cost_fns.append((weight, cost_fn)) + + return cost_fns + + def weigh_hosts(self, num, request_spec, hosts): + """Returns a list of dictionaries of form: + [ {weight: weight, hostname: hostname} ]""" + + # FIXME(sirp): weigh_hosts should handle more than just instances + hostnames = [hostname for hostname, caps in hosts] + + cost_fns = self.get_cost_fns() + costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) + + weighted = [] + weight_log = [] + for cost, hostname in zip(costs, hostnames): + weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) + weight_dict = dict(weight=cost, hostname=hostname) + weighted.append(weight_dict) + + LOG.debug(_("Weighted Costs => %s") % weight_log) + return weighted + + +def normalize_list(L): + """Normalize an array of numbers such that each element satisfies: + 0 <= e <= 1""" + if not L: + return L + max_ = max(L) + if max_ > 0: + return [(float(e) / max_) for e in L] + return L + + +def weighted_sum(domain, weighted_fns, normalize=True): + """Use the weighted-sum method to compute a score for an array of objects. + Normalize the results of the objective-functions so that the weights are + meaningful regardless of objective-function's range. + + domain - input to be scored + weighted_fns - list of weights and functions like: + [(weight, objective-functions)] + + Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts) + """ + # Table of form: + # { domain1: [score1, score2, ..., scoreM] + # ... + # domainN: [score1, score2, ..., scoreM] } + score_table = collections.defaultdict(list) + for weight, fn in weighted_fns: + scores = [fn(elem) for elem in domain] + + if normalize: + norm_scores = normalize_list(scores) + else: + norm_scores = scores + + for idx, score in enumerate(norm_scores): + weighted_score = score * weight + score_table[idx].append(weighted_score) + + # Sum rows in table to compute score for each element in domain + domain_scores = [] + for idx in sorted(score_table): + elem_score = sum(score_table[idx]) + elem = domain[idx] + domain_scores.append(elem_score) + + return domain_scores diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 55cd7208b..bd40e73c0 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -83,11 +83,16 @@ class SchedulerManager(manager.Manager): except AttributeError: host = self.driver.schedule(elevated, topic, *args, **kwargs) + if not host: + LOG.debug(_("%(topic)s %(method)s handled in Scheduler") + % locals()) + return + rpc.cast(context, db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) - LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals()) + LOG.debug(_("Casted to %(topic)s %(host)s for %(method)s") % locals()) # NOTE (masumotok) : This method should be moved to nova.api.ec2.admin. # Based on bexar design summit discussion, diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index dd568d2c6..87cdef11d 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -21,10 +21,9 @@ Simple Scheduler """ -import datetime - from nova import db from nova import flags +from nova import utils from nova.scheduler import driver from nova.scheduler import chance @@ -54,7 +53,7 @@ class SimpleScheduler(chance.ChanceScheduler): # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.instance_update(context, instance_id, {'host': host, 'scheduled_at': now}) return host @@ -66,7 +65,7 @@ class SimpleScheduler(chance.ChanceScheduler): if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.instance_update(context, instance_id, {'host': service['host'], @@ -90,7 +89,7 @@ class SimpleScheduler(chance.ChanceScheduler): # TODO(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) return host @@ -103,7 +102,7 @@ class SimpleScheduler(chance.ChanceScheduler): if self.service_is_up(service): # NOTE(vish): this probably belongs in the manager, if we # can generalize this somehow - now = datetime.datetime.utcnow() + now = utils.utcnow() db.volume_update(context, volume_id, {'host': service['host'], diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index b3d230bd2..df84cf7bd 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -22,7 +22,9 @@ across zones. There are two expansion points to this class for: import operator +from nova import db from nova import log as logging +from nova import rpc from nova.scheduler import api from nova.scheduler import driver @@ -36,64 +38,94 @@ class ZoneAwareScheduler(driver.Scheduler): """Call novaclient zone method. Broken out for testing.""" return api.call_zone_method(context, method, specs=specs) - def schedule_run_instance(self, context, topic='compute', specs={}, - *args, **kwargs): + def schedule_run_instance(self, context, instance_id, request_spec, + *args, **kwargs): """This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being passed in to see if this is a request to: 1. Create a Build Plan and then provision, or 2. Use the Build Plan information in the request parameters to simply create the instance (either in this zone or - a child zone).""" + a child zone). + """ - if 'blob' in specs: - return self.provision_instance(context, topic, specs) + # TODO(sandy): We'll have to look for richer specs at some point. - # Create build plan and provision ... - build_plan = self.select(context, specs) - for item in build_plan: - self.provision_instance(context, topic, item) + if 'blob' in request_spec: + self.provision_resource(context, request_spec, instance_id, kwargs) + return None - def provision_instance(context, topic, item): - """Create the requested instance in this Zone or a child zone.""" - pass + # Create build plan and provision ... + build_plan = self.select(context, request_spec) + if not build_plan: + raise driver.NoValidHost(_('No hosts were available')) - def select(self, context, *args, **kwargs): + for item in build_plan: + self.provision_resource(context, item, instance_id, kwargs) + + # Returning None short-circuits the routing to Compute (since + # we've already done it here) + return None + + def provision_resource(self, context, item, instance_id, kwargs): + """Create the requested resource in this Zone or a child zone.""" + if "hostname" in item: + host = item['hostname'] + kwargs['instance_id'] = instance_id + rpc.cast(context, + db.queue_get_for(context, "compute", host), + {"method": "run_instance", + "args": kwargs}) + LOG.debug(_("Casted to compute %(host)s for run_instance") + % locals()) + else: + # TODO(sandy) Provision in child zone ... + LOG.warning(_("Provision to Child Zone not supported (yet)")) + pass + + def select(self, context, request_spec, *args, **kwargs): """Select returns a list of weights and zone/host information corresponding to the best hosts to service the request. Any child zone information has been encrypted so as not to reveal - anything about the children.""" - return self._schedule(context, "compute", *args, **kwargs) + anything about the children. + """ + return self._schedule(context, "compute", request_spec, + *args, **kwargs) - def schedule(self, context, topic, *args, **kwargs): + # TODO(sandy): We're only focused on compute instances right now, + # so we don't implement the default "schedule()" method required + # of Schedulers. + def schedule(self, context, topic, request_spec, *args, **kwargs): """The schedule() contract requires we return the one best-suited host for this request. """ - res = self._schedule(context, topic, *args, **kwargs) - # TODO(sirp): should this be a host object rather than a weight-dict? - if not res: - raise driver.NoValidHost(_('No hosts were available')) - return res[0] + raise driver.NoValidHost(_('No hosts were available')) - def _schedule(self, context, topic, *args, **kwargs): + def _schedule(self, context, topic, request_spec, *args, **kwargs): """Returns a list of hosts that meet the required specs, ordered by their fitness. """ - #TODO(sandy): extract these from args. + if topic != "compute": + raise NotImplemented(_("Zone Aware Scheduler only understands " + "Compute nodes (for now)")) + + #TODO(sandy): how to infer this from OS API params? num_instances = 1 - specs = {} # Filter local hosts based on requirements ... - host_list = self.filter_hosts(num_instances, specs) + host_list = self.filter_hosts(num_instances, request_spec) + + # TODO(sirp): weigh_hosts should also be a function of 'topic' or + # resources, so that we can apply different objective functions to it # then weigh the selected hosts. # weighted = [{weight=weight, name=hostname}, ...] - weighted = self.weigh_hosts(num_instances, specs, host_list) + weighted = self.weigh_hosts(num_instances, request_spec, host_list) # Next, tack on the best weights from the child zones ... child_results = self._call_zone_method(context, "select", - specs=specs) + specs=request_spec) for child_zone, result in child_results: for weighting in result: # Remember the child_zone so we can get back to @@ -108,12 +140,18 @@ class ZoneAwareScheduler(driver.Scheduler): weighted.sort(key=operator.itemgetter('weight')) return weighted - def filter_hosts(self, num, specs): + def filter_hosts(self, num, request_spec): """Derived classes must override this method and return - a list of hosts in [(hostname, capability_dict)] format.""" - raise NotImplemented() - - def weigh_hosts(self, num, specs, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format.""" - raise NotImplemented() + a list of hosts in [(hostname, capability_dict)] format. + """ + # NOTE(sirp): The default logic is the equivalent to AllHostsFilter + service_states = self.zone_manager.service_states + return [(host, services) + for host, services in service_states.iteritems()] + + def weigh_hosts(self, num, request_spec, hosts): + """Derived classes may override this to provide more sophisticated + scheduling objectives + """ + # NOTE(sirp): The default logic is the same as the NoopCostFunction + return [dict(weight=1, hostname=host) for host, caps in hosts] diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index 3ddf6f3c3..3f483adff 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -17,16 +17,17 @@ ZoneManager oversees all communications with child Zones. """ +import datetime import novaclient import thread import traceback -from datetime import datetime from eventlet import greenpool from nova import db from nova import flags from nova import log as logging +from nova import utils FLAGS = flags.FLAGS flags.DEFINE_integer('zone_db_check_interval', 60, @@ -42,7 +43,7 @@ class ZoneState(object): self.name = None self.capabilities = None self.attempt = 0 - self.last_seen = datetime.min + self.last_seen = datetime.datetime.min self.last_exception = None self.last_exception_time = None @@ -56,7 +57,7 @@ class ZoneState(object): def update_metadata(self, zone_metadata): """Update zone metadata after successful communications with child zone.""" - self.last_seen = datetime.now() + self.last_seen = utils.utcnow() self.attempt = 0 self.name = zone_metadata.get("name", "n/a") self.capabilities = ", ".join(["%s=%s" % (k, v) @@ -72,7 +73,7 @@ class ZoneState(object): """Something went wrong. Check to see if zone should be marked as offline.""" self.last_exception = exception - self.last_exception_time = datetime.now() + self.last_exception_time = utils.utcnow() api_url = self.api_url logging.warning(_("'%(exception)s' error talking to " "zone %(api_url)s") % locals()) @@ -104,7 +105,7 @@ def _poll_zone(zone): class ZoneManager(object): """Keeps the zone states updated.""" def __init__(self): - self.last_zone_db_check = datetime.min + self.last_zone_db_check = datetime.datetime.min self.zone_states = {} # { <zone_id> : ZoneState } self.service_states = {} # { <host> : { <service> : { cap k : v }}} self.green_pool = greenpool.GreenPool() @@ -158,10 +159,10 @@ class ZoneManager(object): def ping(self, context=None): """Ping should be called periodically to update zone status.""" - diff = datetime.now() - self.last_zone_db_check + diff = utils.utcnow() - self.last_zone_db_check if diff.seconds >= FLAGS.zone_db_check_interval: logging.debug(_("Updating zone cache from db.")) - self.last_zone_db_check = datetime.now() + self.last_zone_db_check = utils.utcnow() self._refresh_from_db(context) self._poll_zones(context) diff --git a/nova/test.py b/nova/test.py index 80b2d0a74..4a0a18fe7 100644 --- a/nova/test.py +++ b/nova/test.py @@ -23,7 +23,6 @@ inline callbacks. """ -import datetime import functools import os import shutil @@ -37,6 +36,7 @@ from eventlet import greenthread from nova import fakerabbit from nova import flags from nova import rpc +from nova import utils from nova import service from nova import wsgi from nova.virt import fake @@ -69,7 +69,7 @@ class TestCase(unittest.TestCase): # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system # to work properly. - self.start = datetime.datetime.utcnow() + self.start = utils.utcnow() shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db), os.path.join(FLAGS.state_path, FLAGS.sqlite_db)) @@ -184,7 +184,7 @@ class TestCase(unittest.TestCase): wsgi.Server.start = _wrapped_start # Useful assertions - def assertDictMatch(self, d1, d2): + def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): """Assert two dicts are equivalent. This is a 'deep' match in the sense that it handles nested @@ -215,15 +215,26 @@ class TestCase(unittest.TestCase): for key in d1keys: d1value = d1[key] d2value = d2[key] + try: + error = abs(float(d1value) - float(d2value)) + within_tolerance = error <= tolerance + except (ValueError, TypeError): + # If both values aren't convertable to float, just ignore + # ValueError if arg is a str, TypeError if it's something else + # (like None) + within_tolerance = False + if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): self.assertDictMatch(d1value, d2value) elif 'DONTCARE' in (d1value, d2value): continue + elif approx_equal and within_tolerance: + continue elif d1value != d2value: raise_assertion("d1['%(key)s']=%(d1value)s != " "d2['%(key)s']=%(d2value)s" % locals()) - def assertDictListMatch(self, L1, L2): + def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001): """Assert a list of dicts are equivalent.""" def raise_assertion(msg): L1str = str(L1) @@ -239,4 +250,5 @@ class TestCase(unittest.TestCase): 'len(L2)=%(L2count)d' % locals()) for d1, d2 in zip(L1, L2): - self.assertDictMatch(d1, d2) + self.assertDictMatch(d1, d2, approx_equal=approx_equal, + tolerance=tolerance) diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py index dbdd0928a..03aad007a 100644 --- a/nova/tests/api/openstack/extensions/foxinsocks.py +++ b/nova/tests/api/openstack/extensions/foxinsocks.py @@ -17,12 +17,10 @@ import json -from nova import wsgi - from nova.api.openstack import extensions -class FoxInSocksController(wsgi.Controller): +class FoxInSocksController(object): def index(self, req): return "Try to say this Mr. Knox, sir..." diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index bf51239e6..62e44ba96 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -16,7 +16,6 @@ # under the License. import copy -import datetime import json import random import string @@ -38,6 +37,7 @@ from nova.api.openstack import auth from nova.api.openstack import versions from nova.api.openstack import limits from nova.auth.manager import User, Project +import nova.image.fake from nova.image import glance from nova.image import local from nova.image import service @@ -104,10 +104,12 @@ def stub_out_key_pair_funcs(stubs, have_key_pair=True): def stub_out_image_service(stubs): - def fake_image_show(meh, context, id): - return dict(kernelId=1, ramdiskId=1) - - stubs.Set(local.LocalImageService, 'show', fake_image_show) + def fake_get_image_service(image_href): + image_id = int(str(image_href).split('/')[-1]) + return (nova.image.fake.FakeImageService(), image_id) + stubs.Set(nova.image, 'get_image_service', fake_get_image_service) + stubs.Set(nova.image, 'get_default_image_service', + lambda: nova.image.fake.FakeImageService()) def stub_out_auth(stubs): @@ -166,11 +168,11 @@ def stub_out_glance(stubs, initial_fixtures=None): def __init__(self, initial_fixtures): self.fixtures = initial_fixtures or [] - def fake_get_images(self): + def fake_get_images(self, filters=None): return [dict(id=f['id'], name=f['name']) for f in self.fixtures] - def fake_get_images_detailed(self): + def fake_get_images_detailed(self, filters=None): return copy.deepcopy(self.fixtures) def fake_get_image_meta(self, image_id): @@ -208,7 +210,7 @@ def stub_out_glance(stubs, initial_fixtures=None): def _find_image(self, image_id): for f in self.fixtures: - if f['id'] == image_id: + if str(f['id']) == str(image_id): return f return None @@ -253,7 +255,7 @@ class FakeAuthDatabase(object): @staticmethod def auth_token_create(context, token): - fake_token = FakeToken(created_at=datetime.datetime.now(), **token) + fake_token = FakeToken(created_at=utils.utcnow(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 544298602..60914c0a3 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -26,15 +26,15 @@ from nova import flags from nova.api import openstack from nova.api.openstack import extensions from nova.api.openstack import flavors +from nova.api.openstack import wsgi from nova.tests.api.openstack import fakes -import nova.wsgi FLAGS = flags.FLAGS response_body = "Try to say this Mr. Knox, sir..." -class StubController(nova.wsgi.Controller): +class StubController(object): def __init__(self, body): self.body = body diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 2c329f920..93b402081 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -22,12 +22,12 @@ and as a WSGI layer import copy import json -import datetime import os import shutil import tempfile import xml.dom.minidom as minidom +import mox import stubout import webob @@ -708,6 +708,146 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertDictListMatch(expected, response_list) + def test_image_filter_with_name(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'name': 'testname'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?name=testname') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_filter_with_status(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?status=ACTIVE') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_filter_with_property(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'property-test': '3'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?property-test=3') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_filter_not_supported(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_no_filters(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {} + image_service.index(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.index(request) + mocker.VerifyAll() + + def test_image_detail_filter_with_name(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'name': 'testname'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail?name=testname') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + + def test_image_detail_filter_with_status(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail?status=ACTIVE') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + + def test_image_detail_filter_with_property(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'property-test': '3'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail?property-test=3') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + + def test_image_detail_filter_not_supported(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {'status': 'ACTIVE'} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + + def test_image_detail_no_filters(self): + mocker = mox.Mox() + image_service = mocker.CreateMockAnything() + context = object() + filters = {} + image_service.detail(context, filters).AndReturn([]) + mocker.ReplayAll() + request = webob.Request.blank( + '/v1.1/images/detail') + request.environ['nova.context'] = context + controller = images.ControllerV11(image_service=image_service) + controller.detail(request) + mocker.VerifyAll() + def test_get_image_found(self): req = webob.Request.blank('/v1.0/images/123') res = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 70f59eda6..01613d1d8 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -73,7 +73,7 @@ class LimitsControllerV10Test(BaseLimitTestSuite): def setUp(self): """Run before each test.""" BaseLimitTestSuite.setUp(self) - self.controller = limits.LimitsControllerV10() + self.controller = limits.create_resource('1.0') def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" @@ -209,7 +209,7 @@ class LimitsControllerV11Test(BaseLimitTestSuite): def setUp(self): """Run before each test.""" BaseLimitTestSuite.setUp(self) - self.controller = limits.LimitsControllerV11() + self.controller = limits.create_resource('1.1') def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index fbde5c9ce..28ad4a417 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -16,7 +16,6 @@ # under the License. import base64 -import datetime import json import unittest from xml.dom import minidom @@ -29,6 +28,7 @@ from nova import db from nova import exception from nova import flags from nova import test +from nova import utils import nova.api.openstack from nova.api.openstack import servers import nova.compute.api @@ -37,6 +37,7 @@ from nova.compute import power_state import nova.db.api from nova.db.sqlalchemy.models import Instance from nova.db.sqlalchemy.models import InstanceMetadata +import nova.image.fake import nova.rpc from nova.tests.api.openstack import common from nova.tests.api.openstack import fakes @@ -97,7 +98,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None, "admin_pass": "", "user_id": user_id, "project_id": "", - "image_id": "10", + "image_ref": "10", "kernel_id": "", "ramdisk_id": "", "launch_index": 0, @@ -114,9 +115,9 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None, "user_data": "", "reservation_id": "", "mac_address": "", - "scheduled_at": datetime.datetime.now(), - "launched_at": datetime.datetime.now(), - "terminated_at": datetime.datetime.now(), + "scheduled_at": utils.utcnow(), + "launched_at": utils.utcnow(), + "terminated_at": utils.utcnow(), "availability_zone": "", "display_name": "server%s" % id, "display_description": "", @@ -217,7 +218,6 @@ class ServersTest(test.TestCase): }, ] - print res_dict['server'] self.assertEqual(res_dict['server']['links'], expected_links) def test_get_server_by_id_with_addresses_xml(self): @@ -485,8 +485,6 @@ class ServersTest(test.TestCase): fake_method) self.stubs.Set(nova.api.openstack.servers.Controller, "_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping) - self.stubs.Set(nova.api.openstack.common, - "get_image_id_from_image_hash", image_id_from_hash) self.stubs.Set(nova.compute.api.API, "_find_host", find_host) def _test_create_instance_helper(self): @@ -589,12 +587,12 @@ class ServersTest(test.TestCase): def test_create_instance_v1_1(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/2' + image_href = 'http://localhost/v1.1/images/2' flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref, + 'imageRef': image_href, 'flavorRef': flavor_ref, 'metadata': { 'hello': 'world', @@ -616,16 +614,16 @@ class ServersTest(test.TestCase): self.assertEqual('server_test', server['name']) self.assertEqual(1, server['id']) self.assertEqual(flavor_ref, server['flavorRef']) - self.assertEqual(image_ref, server['imageRef']) + self.assertEqual(image_href, server['imageRef']) self.assertEqual(res.status_int, 200) def test_create_instance_v1_1_bad_href(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/asdf' + image_href = 'http://localhost/v1.1/images/asdf' flavor_ref = 'http://localhost/v1.1/flavors/3' body = dict(server=dict( - name='server_test', imageRef=image_ref, flavorRef=flavor_ref, + name='server_test', imageRef=image_href, flavorRef=flavor_ref, metadata={'hello': 'world', 'open': 'stack'}, personality={})) req = webob.Request.blank('/v1.1/servers') @@ -638,13 +636,12 @@ class ServersTest(test.TestCase): def test_create_instance_v1_1_local_href(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/2' - image_ref_local = '2' + image_id = 2 flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref_local, + 'imageRef': image_id, 'flavorRef': flavor_ref, }, } @@ -659,7 +656,7 @@ class ServersTest(test.TestCase): server = json.loads(res.body)['server'] self.assertEqual(1, server['id']) self.assertEqual(flavor_ref, server['flavorRef']) - self.assertEqual(image_ref, server['imageRef']) + self.assertEqual(image_id, server['imageRef']) self.assertEqual(res.status_int, 200) def test_create_instance_with_admin_pass_v1_0(self): @@ -686,12 +683,12 @@ class ServersTest(test.TestCase): def test_create_instance_with_admin_pass_v1_1(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/2' + image_href = 'http://localhost/v1.1/images/2' flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref, + 'imageRef': image_href, 'flavorRef': flavor_ref, 'adminPass': 'testpass', }, @@ -708,12 +705,12 @@ class ServersTest(test.TestCase): def test_create_instance_with_empty_admin_pass_v1_1(self): self._setup_for_create_instance() - image_ref = 'http://localhost/v1.1/images/2' + image_href = 'http://localhost/v1.1/images/2' flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': image_ref, + 'imageRef': image_href, 'flavorRef': flavor_ref, 'adminPass': '', }, @@ -773,9 +770,7 @@ class ServersTest(test.TestCase): self.body = json.dumps(dict(server=inst_dict)) def server_update(context, id, params): - filtered_dict = dict( - display_name='server_test' - ) + filtered_dict = dict(display_name='server_test') self.assertEqual(params, filtered_dict) return filtered_dict @@ -844,7 +839,6 @@ class ServersTest(test.TestCase): req = webob.Request.blank('/v1.0/servers/detail') req.headers['Accept'] = 'application/xml' res = req.get_response(fakes.wsgi_app()) - print res.body dom = minidom.parseString(res.body) for i, server in enumerate(dom.getElementsByTagName('server')): self.assertEqual(server.getAttribute('id'), str(i)) @@ -865,7 +859,7 @@ class ServersTest(test.TestCase): self.assertEqual(s['id'], i) self.assertEqual(s['hostId'], '') self.assertEqual(s['name'], 'server%d' % i) - self.assertEqual(s['imageId'], '10') + self.assertEqual(s['imageId'], 10) self.assertEqual(s['flavorId'], 1) self.assertEqual(s['status'], 'BUILD') self.assertEqual(s['metadata']['seq'], str(i)) @@ -879,7 +873,7 @@ class ServersTest(test.TestCase): self.assertEqual(s['id'], i) self.assertEqual(s['hostId'], '') self.assertEqual(s['name'], 'server%d' % i) - self.assertEqual(s['imageRef'], 'http://localhost/v1.1/images/10') + self.assertEqual(s['imageRef'], 10) self.assertEqual(s['flavorRef'], 'http://localhost/v1.1/flavors/1') self.assertEqual(s['status'], 'BUILD') self.assertEqual(s['metadata']['seq'], str(i)) @@ -911,7 +905,7 @@ class ServersTest(test.TestCase): self.assertEqual(s['id'], i) self.assertEqual(s['hostId'], host_ids[i % 2]) self.assertEqual(s['name'], 'server%d' % i) - self.assertEqual(s['imageId'], '10') + self.assertEqual(s['imageId'], 10) self.assertEqual(s['flavorId'], 1) def test_server_pause(self): @@ -1008,6 +1002,14 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 501) + def test_server_change_password_xml(self): + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.content_type = 'application/xml' + req.body = '<changePassword adminPass="1234pass">' +# res = req.get_response(fakes.wsgi_app()) +# self.assertEqual(res.status_int, 501) + def test_server_change_password_v1_1(self): mock_method = MockSetAdminPassword() self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method) @@ -1267,6 +1269,25 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 202) self.assertEqual(self.resize_called, True) + def test_resize_server_v11(self): + + req = webob.Request.blank('/v1.1/servers/1/action') + req.content_type = 'application/json' + req.method = 'POST' + body_dict = dict(resize=dict(flavorRef="http://localhost/3")) + req.body = json.dumps(body_dict) + + self.resize_called = False + + def resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + def test_resize_bad_flavor_fails(self): req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3))) @@ -1380,13 +1401,13 @@ class ServersTest(test.TestCase): class TestServerCreateRequestXMLDeserializer(unittest.TestCase): def setUp(self): - self.deserializer = servers.ServerCreateRequestXMLDeserializer() + self.deserializer = servers.ServerXMLDeserializer() def test_minimal_request(self): serial_request = """ <server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" name="new-server-test" imageId="1" flavorId="1"/>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1400,7 +1421,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): name="new-server-test" imageId="1" flavorId="1"> <metadata/> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1415,7 +1436,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): name="new-server-test" imageId="1" flavorId="1"> <personality/> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1431,7 +1452,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <metadata/> <personality/> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1448,7 +1469,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <personality/> <metadata/> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"server": { "name": "new-server-test", "imageId": "1", @@ -1466,7 +1487,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <file path="/etc/conf">aabbccdd</file> </personality> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}] self.assertEquals(request["server"]["personality"], expected) @@ -1476,7 +1497,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): name="new-server-test" imageId="1" flavorId="1"> <personality><file path="/etc/conf">aabbccdd</file> <file path="/etc/sudoers">abcd</file></personality></server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}, {"path": "/etc/sudoers", "contents": "abcd"}] self.assertEquals(request["server"]["personality"], expected) @@ -1492,7 +1513,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <file path="/etc/ignoreme">anything</file> </personality> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}] self.assertEquals(request["server"]["personality"], expected) @@ -1501,7 +1522,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" name="new-server-test" imageId="1" flavorId="1"> <personality><file>aabbccdd</file></personality></server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"contents": "aabbccdd"}] self.assertEquals(request["server"]["personality"], expected) @@ -1510,7 +1531,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" name="new-server-test" imageId="1" flavorId="1"> <personality><file path="/etc/conf"></file></personality></server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": ""}] self.assertEquals(request["server"]["personality"], expected) @@ -1519,7 +1540,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0" name="new-server-test" imageId="1" flavorId="1"> <personality><file path="/etc/conf"/></personality></server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": ""}] self.assertEquals(request["server"]["personality"], expected) @@ -1531,7 +1552,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <meta key="alpha">beta</meta> </metadata> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "beta"} self.assertEquals(request["server"]["metadata"], expected) @@ -1544,7 +1565,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <meta key="foo">bar</meta> </metadata> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "beta", "foo": "bar"} self.assertEquals(request["server"]["metadata"], expected) @@ -1556,7 +1577,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <meta key="alpha"></meta> </metadata> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": ""} self.assertEquals(request["server"]["metadata"], expected) @@ -1569,7 +1590,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <meta key="delta"/> </metadata> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "", "delta": ""} self.assertEquals(request["server"]["metadata"], expected) @@ -1581,7 +1602,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <meta>beta</meta> </metadata> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"": "beta"} self.assertEquals(request["server"]["metadata"], expected) @@ -1594,7 +1615,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <meta>gamma</meta> </metadata> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"": "gamma"} self.assertEquals(request["server"]["metadata"], expected) @@ -1607,7 +1628,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <meta key="foo">baz</meta> </metadata> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') expected = {"foo": "baz"} self.assertEquals(request["server"]["metadata"], expected) @@ -1654,17 +1675,17 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", }, ], }} - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') self.assertEqual(request, expected) - def test_request_xmlser_with_flavor_image_ref(self): + def test_request_xmlser_with_flavor_image_href(self): serial_request = """ <server xmlns="http://docs.openstack.org/compute/api/v1.1" name="new-server-test" imageRef="http://localhost:8774/v1.1/images/1" flavorRef="http://localhost:8774/v1.1/flavors/1"> </server>""" - request = self.deserializer.deserialize(serial_request) + request = self.deserializer.deserialize(serial_request, 'create') self.assertEquals(request["server"]["flavorRef"], "http://localhost:8774/v1.1/flavors/1") self.assertEquals(request["server"]["imageRef"], @@ -1679,6 +1700,7 @@ class TestServerInstanceCreation(test.TestCase): fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} fakes.stub_out_auth(self.stubs) + fakes.stub_out_image_service(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) self.allow_admin = FLAGS.allow_admin_api @@ -1713,8 +1735,6 @@ class TestServerInstanceCreation(test.TestCase): self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api)) self.stubs.Set(nova.api.openstack.servers.Controller, '_get_kernel_ramdisk_from_image', make_stub_method((1, 1))) - self.stubs.Set(nova.api.openstack.common, - 'get_image_id_from_image_hash', make_stub_method(2)) return compute_api def _create_personality_request_dict(self, personality_files): diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py new file mode 100644 index 000000000..ebbdc9409 --- /dev/null +++ b/nova/tests/api/openstack/test_wsgi.py @@ -0,0 +1,293 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +import json +import webob + +from nova import exception +from nova import test +from nova.api.openstack import wsgi + + +class RequestTest(test.TestCase): + def test_content_type_missing(self): + request = wsgi.Request.blank('/tests/123') + request.body = "<body />" + self.assertRaises(exception.InvalidContentType, + request.get_content_type) + + def test_content_type_unsupported(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "text/html" + request.body = "asdf<br />" + self.assertRaises(exception.InvalidContentType, + request.get_content_type) + + def test_content_type_with_charset(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/json; charset=UTF-8" + result = request.get_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_from_accept_xml(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = "application/xml, application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123') + request.headers["Accept"] = \ + "application/json; q=0.3, application/xml; q=0.9" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_from_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + request = wsgi.Request.blank('/tests/123.json') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + request = wsgi.Request.blank('/tests/123.invalid') + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + def test_content_type_accept_and_query_extension(self): + request = wsgi.Request.blank('/tests/123.xml') + request.headers["Accept"] = "application/json" + result = request.best_match_content_type() + self.assertEqual(result, "application/xml") + + def test_content_type_accept_default(self): + request = wsgi.Request.blank('/tests/123.unsupported') + request.headers["Accept"] = "application/unsupported1" + result = request.best_match_content_type() + self.assertEqual(result, "application/json") + + +class DictSerializerTest(test.TestCase): + def test_dispatch(self): + serializer = wsgi.DictSerializer() + serializer.create = lambda x: 'pants' + serializer.default = lambda x: 'trousers' + self.assertEqual(serializer.serialize({}, 'create'), 'pants') + + def test_dispatch_default(self): + serializer = wsgi.DictSerializer() + serializer.create = lambda x: 'pants' + serializer.default = lambda x: 'trousers' + self.assertEqual(serializer.serialize({}, 'update'), 'trousers') + + +class XMLDictSerializerTest(test.TestCase): + def test_xml(self): + input_dict = dict(servers=dict(a=(2, 3))) + expected_xml = '<serversxmlns="asdf"><a>(2,3)</a></servers>' + serializer = wsgi.XMLDictSerializer(xmlns="asdf") + result = serializer.serialize(input_dict) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_xml) + + +class JSONDictSerializerTest(test.TestCase): + def test_json(self): + input_dict = dict(servers=dict(a=(2, 3))) + expected_json = '{"servers":{"a":[2,3]}}' + serializer = wsgi.JSONDictSerializer() + result = serializer.serialize(input_dict) + result = result.replace('\n', '').replace(' ', '') + self.assertEqual(result, expected_json) + + +class TextDeserializerTest(test.TestCase): + def test_dispatch(self): + deserializer = wsgi.TextDeserializer() + deserializer.create = lambda x: 'pants' + deserializer.default = lambda x: 'trousers' + self.assertEqual(deserializer.deserialize({}, 'create'), 'pants') + + def test_dispatch_default(self): + deserializer = wsgi.TextDeserializer() + deserializer.create = lambda x: 'pants' + deserializer.default = lambda x: 'trousers' + self.assertEqual(deserializer.deserialize({}, 'update'), 'trousers') + + +class JSONDeserializerTest(test.TestCase): + def test_json(self): + data = """{"a": { + "a1": "1", + "a2": "2", + "bs": ["1", "2", "3", {"c": {"c1": "1"}}], + "d": {"e": "1"}, + "f": "1"}}""" + as_dict = dict(a={ + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': dict(c1='1')}], + 'd': {'e': '1'}, + 'f': '1'}) + deserializer = wsgi.JSONDeserializer() + self.assertEqual(deserializer.deserialize(data), as_dict) + + +class XMLDeserializerTest(test.TestCase): + def test_xml(self): + xml = """ + <a a1="1" a2="2"> + <bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs> + <d><e>1</e></d> + <f>1</f> + </a> + """.strip() + as_dict = dict(a={ + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': dict(c1='1')}], + 'd': {'e': '1'}, + 'f': '1'}) + metadata = {'plurals': {'bs': 'b', 'ts': 't'}} + deserializer = wsgi.XMLDeserializer(metadata=metadata) + self.assertEqual(deserializer.deserialize(xml), as_dict) + + def test_xml_empty(self): + xml = """<a></a>""" + as_dict = {"a": {}} + deserializer = wsgi.XMLDeserializer() + self.assertEqual(deserializer.deserialize(xml), as_dict) + + +class ResponseSerializerTest(test.TestCase): + def setUp(self): + class JSONSerializer(object): + def serialize(self, data): + return 'pew_json' + + class XMLSerializer(object): + def serialize(self, data): + return 'pew_xml' + + self.serializers = { + 'application/json': JSONSerializer(), + 'application/XML': XMLSerializer(), + } + + self.serializer = wsgi.ResponseSerializer(serializers=self.serializers) + + def tearDown(self): + pass + + def test_get_serializer(self): + self.assertEqual(self.serializer.get_serializer('application/json'), + self.serializers['application/json']) + + def test_get_serializer_unknown_content_type(self): + self.assertRaises(exception.InvalidContentType, + self.serializer.get_serializer, + 'application/unknown') + + def test_serialize_response(self): + response = self.serializer.serialize({}, 'application/json') + self.assertEqual(response.headers['Content-Type'], 'application/json') + self.assertEqual(response.body, 'pew_json') + + def test_serialize_response_dict_to_unknown_content_type(self): + self.assertRaises(exception.InvalidContentType, + self.serializer.serialize, + {}, 'application/unknown') + + +class RequestDeserializerTest(test.TestCase): + def setUp(self): + class JSONDeserializer(object): + def deserialize(self, data): + return 'pew_json' + + class XMLDeserializer(object): + def deserialize(self, data): + return 'pew_xml' + + self.deserializers = { + 'application/json': JSONDeserializer(), + 'application/XML': XMLDeserializer(), + } + + self.deserializer = wsgi.RequestDeserializer( + deserializers=self.deserializers) + + def tearDown(self): + pass + + def test_get_deserializer(self): + expected = self.deserializer.get_deserializer('application/json') + self.assertEqual(expected, self.deserializers['application/json']) + + def test_get_deserializer_unknown_content_type(self): + self.assertRaises(exception.InvalidContentType, + self.deserializer.get_deserializer, + 'application/unknown') + + def test_get_expected_content_type(self): + request = wsgi.Request.blank('/') + request.headers['Accept'] = 'application/json' + self.assertEqual(self.deserializer.get_expected_content_type(request), + 'application/json') + + def test_get_action_args(self): + env = { + 'wsgiorg.routing_args': [None, { + 'controller': None, + 'format': None, + 'action': 'update', + 'id': 12, + }], + } + + expected = {'action': 'update', 'id': 12} + + self.assertEqual(self.deserializer.get_action_args(env), expected) + + def test_deserialize(self): + def fake_get_routing_args(request): + return {'action': 'create'} + self.deserializer.get_action_args = fake_get_routing_args + + request = wsgi.Request.blank('/') + request.headers['Accept'] = 'application/xml' + + deserialized = self.deserializer.deserialize(request) + expected = ('create', {}, 'application/xml') + + self.assertEqual(expected, deserialized) + + +class ResourceTest(test.TestCase): + def test_dispatch(self): + class Controller(object): + def index(self, req, pants=None): + return pants + + resource = wsgi.Resource(Controller()) + actual = resource.dispatch(None, 'index', {'pants': 'off'}) + expected = 'off' + self.assertEqual(actual, expected) + + def test_dispatch_unknown_controller_action(self): + class Controller(object): + def index(self, req, pants=None): + return pants + + resource = wsgi.Resource(Controller()) + self.assertRaises(AttributeError, resource.dispatch, + None, 'create', {}) diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index 5820ecdc2..d33268296 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -67,192 +67,3 @@ class Test(test.TestCase): self.assertEqual(result.body, "Router result") result = webob.Request.blank('/bad').get_response(Router()) self.assertNotEqual(result.body, "Router result") - - -class ControllerTest(test.TestCase): - - class TestRouter(wsgi.Router): - - class TestController(wsgi.Controller): - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "test": ["id"]}}} - - def show(self, req, id): # pylint: disable=W0622,C0103 - return {"test": {"id": id}} - - def __init__(self): - mapper = routes.Mapper() - mapper.resource("test", "tests", controller=self.TestController()) - wsgi.Router.__init__(self, mapper) - - def test_show(self): - request = wsgi.Request.blank('/tests/123') - result = request.get_response(self.TestRouter()) - self.assertEqual(json.loads(result.body), {"test": {"id": "123"}}) - - def test_response_content_type_from_accept_xml(self): - request = webob.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml" - result = request.get_response(self.TestRouter()) - self.assertEqual(result.headers["Content-Type"], "application/xml") - - def test_response_content_type_from_accept_json(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/json" - result = request.get_response(self.TestRouter()) - self.assertEqual(result.headers["Content-Type"], "application/json") - - def test_response_content_type_from_query_extension_xml(self): - request = wsgi.Request.blank('/tests/123.xml') - result = request.get_response(self.TestRouter()) - self.assertEqual(result.headers["Content-Type"], "application/xml") - - def test_response_content_type_from_query_extension_json(self): - request = wsgi.Request.blank('/tests/123.json') - result = request.get_response(self.TestRouter()) - self.assertEqual(result.headers["Content-Type"], "application/json") - - def test_response_content_type_default_when_unsupported(self): - request = wsgi.Request.blank('/tests/123.unsupported') - request.headers["Accept"] = "application/unsupported1" - result = request.get_response(self.TestRouter()) - self.assertEqual(result.status_int, 200) - self.assertEqual(result.headers["Content-Type"], "application/json") - - -class RequestTest(test.TestCase): - - def test_request_content_type_missing(self): - request = wsgi.Request.blank('/tests/123') - request.body = "<body />" - self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) - - def test_request_content_type_unsupported(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Type"] = "text/html" - request.body = "asdf<br />" - self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) - - def test_request_content_type_with_charset(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Content-Type"] = "application/json; charset=UTF-8" - result = request.get_content_type() - self.assertEqual(result, "application/json") - - def test_content_type_from_accept_xml(self): - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml" - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/json" - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = "application/xml, application/json" - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - request = wsgi.Request.blank('/tests/123') - request.headers["Accept"] = \ - "application/json; q=0.3, application/xml; q=0.9" - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - def test_content_type_from_query_extension(self): - request = wsgi.Request.blank('/tests/123.xml') - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - request = wsgi.Request.blank('/tests/123.json') - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - request = wsgi.Request.blank('/tests/123.invalid') - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - def test_content_type_accept_and_query_extension(self): - request = wsgi.Request.blank('/tests/123.xml') - request.headers["Accept"] = "application/json" - result = request.best_match_content_type() - self.assertEqual(result, "application/xml") - - def test_content_type_accept_default(self): - request = wsgi.Request.blank('/tests/123.unsupported') - request.headers["Accept"] = "application/unsupported1" - result = request.best_match_content_type() - self.assertEqual(result, "application/json") - - -class SerializerTest(test.TestCase): - - def test_xml(self): - input_dict = dict(servers=dict(a=(2, 3))) - expected_xml = '<servers><a>(2,3)</a></servers>' - serializer = wsgi.Serializer() - result = serializer.serialize(input_dict, "application/xml") - result = result.replace('\n', '').replace(' ', '') - self.assertEqual(result, expected_xml) - - def test_json(self): - input_dict = dict(servers=dict(a=(2, 3))) - expected_json = '{"servers":{"a":[2,3]}}' - serializer = wsgi.Serializer() - result = serializer.serialize(input_dict, "application/json") - result = result.replace('\n', '').replace(' ', '') - self.assertEqual(result, expected_json) - - def test_unsupported_content_type(self): - serializer = wsgi.Serializer() - self.assertRaises(exception.InvalidContentType, serializer.serialize, - {}, "text/null") - - def test_deserialize_json(self): - data = """{"a": { - "a1": "1", - "a2": "2", - "bs": ["1", "2", "3", {"c": {"c1": "1"}}], - "d": {"e": "1"}, - "f": "1"}}""" - as_dict = dict(a={ - 'a1': '1', - 'a2': '2', - 'bs': ['1', '2', '3', {'c': dict(c1='1')}], - 'd': {'e': '1'}, - 'f': '1'}) - metadata = {} - serializer = wsgi.Serializer(metadata) - self.assertEqual(serializer.deserialize(data, "application/json"), - as_dict) - - def test_deserialize_xml(self): - xml = """ - <a a1="1" a2="2"> - <bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs> - <d><e>1</e></d> - <f>1</f> - </a> - """.strip() - as_dict = dict(a={ - 'a1': '1', - 'a2': '2', - 'bs': ['1', '2', '3', {'c': dict(c1='1')}], - 'd': {'e': '1'}, - 'f': '1'}) - metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})} - serializer = wsgi.Serializer(metadata) - self.assertEqual(serializer.deserialize(xml, "application/xml"), - as_dict) - - def test_deserialize_empty_xml(self): - xml = """<a></a>""" - as_dict = {"a": {}} - serializer = wsgi.Serializer() - self.assertEqual(serializer.deserialize(xml, "application/xml"), - as_dict) diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 5872552ec..1e0b90d82 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -16,13 +16,14 @@ import StringIO -import glance.client +import nova.image -def stubout_glance_client(stubs, cls): - """Stubs out glance.client.Client""" - stubs.Set(glance.client, 'Client', - lambda *args, **kwargs: cls(*args, **kwargs)) +def stubout_glance_client(stubs): + def fake_get_glance_client(image_href): + image_id = int(str(image_href).split('/')[-1]) + return (FakeGlance('foo'), image_id) + stubs.Set(nova.image, 'get_glance_client', fake_get_glance_client) class FakeGlance(object): diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 109905ded..6d108d494 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -34,7 +34,7 @@ class StubGlanceClient(object): def get_image_meta(self, image_id): return self.images[image_id] - def get_images_detailed(self): + def get_images_detailed(self, filters=None): return self.images.itervalues() def get_image(self, image_id): diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py index 7e20c9b00..eb9a3056e 100644 --- a/nova/tests/integrated/api/client.py +++ b/nova/tests/integrated/api/client.py @@ -152,7 +152,10 @@ class TestOpenStackClient(object): def _decode_json(self, response): body = response.read() LOG.debug(_("Decoding JSON: %s") % (body)) - return json.loads(body) + if body: + return json.loads(body) + else: + return "" def api_get(self, relative_uri, **kwargs): kwargs.setdefault('check_response_status', [200]) @@ -166,7 +169,7 @@ class TestOpenStackClient(object): headers['Content-Type'] = 'application/json' kwargs['body'] = json.dumps(body) - kwargs.setdefault('check_response_status', [200]) + kwargs.setdefault('check_response_status', [200, 202]) response = self.api_request(relative_uri, **kwargs) return self._decode_json(response) @@ -185,6 +188,9 @@ class TestOpenStackClient(object): def post_server(self, server): return self.api_post('/servers', server)['server'] + def post_server_action(self, server_id, data): + return self.api_post('/servers/%s/action' % server_id, data) + def delete_server(self, server_id): return self.api_delete('/servers/%s' % server_id) diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 7f590441e..522c7cb0e 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -27,6 +27,7 @@ from nova import flags from nova import service from nova import test # For the flags from nova.auth import manager +import nova.image.glance from nova.log import logging from nova.tests.integrated.api import client @@ -151,6 +152,11 @@ class _IntegratedTestBase(test.TestCase): f = self._get_flags() self.flags(**f) + def fake_get_image_service(image_href): + image_id = int(str(image_href).split('/')[-1]) + return (nova.image.fake.FakeImageService(), image_id) + self.stubs.Set(nova.image, 'get_image_service', fake_get_image_service) + # set up services self.start_service('compute') self.start_service('volume') @@ -199,19 +205,13 @@ class _IntegratedTestBase(test.TestCase): LOG.debug("Image: %s" % image) if 'imageRef' in image: - image_ref = image['imageRef'] + image_href = image['imageRef'] else: - # NOTE(justinsb): The imageRef code hasn't yet landed - LOG.warning("imageRef not yet in images output") - image_ref = image['id'] - - # TODO(justinsb): This is FUBAR - image_ref = abs(hash(image_ref)) - - image_ref = 'http://fake.server/%s' % image_ref + image_href = image['id'] + image_href = 'http://fake.server/%s' % image_href # We now have a valid imageId - server['imageRef'] = image_ref + server['imageRef'] = image_href # Set a valid flavorId flavor = self.api.get_flavors()[0] diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index e89d0100a..fcb517cf5 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -179,6 +179,112 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Cleanup self._delete_server(created_server_id) + def test_create_and_rebuild_server(self): + """Rebuild a server.""" + + # create a server with initially has no metadata + server = self._build_minimal_create_server_request() + server_post = {'server': server} + created_server = self.api.post_server(server_post) + LOG.debug("created_server: %s" % created_server) + self.assertTrue(created_server['id']) + created_server_id = created_server['id'] + + # rebuild the server with metadata + post = {} + post['rebuild'] = { + "imageRef": "https://localhost/v1.1/32278/images/2", + "name": "blah", + } + + self.api.post_server_action(created_server_id, post) + LOG.debug("rebuilt server: %s" % created_server) + self.assertTrue(created_server['id']) + + found_server = self.api.get_server(created_server_id) + self.assertEqual(created_server_id, found_server['id']) + self.assertEqual({}, found_server.get('metadata')) + self.assertEqual('blah', found_server.get('name')) + + # Cleanup + self._delete_server(created_server_id) + + def test_create_and_rebuild_server_with_metadata(self): + """Rebuild a server with metadata.""" + + # create a server with initially has no metadata + server = self._build_minimal_create_server_request() + server_post = {'server': server} + created_server = self.api.post_server(server_post) + LOG.debug("created_server: %s" % created_server) + self.assertTrue(created_server['id']) + created_server_id = created_server['id'] + + # rebuild the server with metadata + post = {} + post['rebuild'] = { + "imageRef": "https://localhost/v1.1/32278/images/2", + "name": "blah", + } + + metadata = {} + for i in range(30): + metadata['key_%s' % i] = 'value_%s' % i + + post['rebuild']['metadata'] = metadata + + self.api.post_server_action(created_server_id, post) + LOG.debug("rebuilt server: %s" % created_server) + self.assertTrue(created_server['id']) + + found_server = self.api.get_server(created_server_id) + self.assertEqual(created_server_id, found_server['id']) + self.assertEqual(metadata, found_server.get('metadata')) + self.assertEqual('blah', found_server.get('name')) + + # Cleanup + self._delete_server(created_server_id) + + def test_create_and_rebuild_server_with_metadata_removal(self): + """Rebuild a server with metadata.""" + + # create a server with initially has no metadata + server = self._build_minimal_create_server_request() + server_post = {'server': server} + + metadata = {} + for i in range(30): + metadata['key_%s' % i] = 'value_%s' % i + + server_post['server']['metadata'] = metadata + + created_server = self.api.post_server(server_post) + LOG.debug("created_server: %s" % created_server) + self.assertTrue(created_server['id']) + created_server_id = created_server['id'] + + # rebuild the server with metadata + post = {} + post['rebuild'] = { + "imageRef": "https://localhost/v1.1/32278/images/2", + "name": "blah", + } + + metadata = {} + post['rebuild']['metadata'] = metadata + + self.api.post_server_action(created_server_id, post) + LOG.debug("rebuilt server: %s" % created_server) + self.assertTrue(created_server['id']) + + found_server = self.api.get_server(created_server_id) + self.assertEqual(created_server_id, found_server['id']) + self.assertEqual(metadata, found_server.get('metadata')) + self.assertEqual('blah', found_server.get('name')) + + # Cleanup + self._delete_server(created_server_id) + if __name__ == "__main__": unittest.main() diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py index 8a9754777..fde32f797 100644 --- a/nova/tests/integrated/test_xml.py +++ b/nova/tests/integrated/test_xml.py @@ -32,7 +32,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase): """"Some basic XML sanity checks.""" def test_namespace_limits(self): - """/limits should have v1.0 namespace (hasn't changed in 1.1).""" + """/limits should have v1.1 namespace (has changed in 1.1).""" headers = {} headers['Accept'] = 'application/xml' @@ -40,7 +40,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase): data = response.read() LOG.debug("data: %s" % data) - prefix = '<limits xmlns="%s"' % common.XML_NS_V10 + prefix = '<limits xmlns="%s"' % common.XML_NS_V11 self.assertTrue(data.startswith(prefix)) def test_namespace_servers(self): diff --git a/nova/tests/scheduler/__init__.py b/nova/tests/scheduler/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/nova/tests/scheduler/__init__.py diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py new file mode 100644 index 000000000..07817cc5a --- /dev/null +++ b/nova/tests/scheduler/test_host_filter.py @@ -0,0 +1,206 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler Host Filters. +""" + +import json + +from nova import exception +from nova import flags +from nova import test +from nova.scheduler import host_filter + +FLAGS = flags.FLAGS + + +class FakeZoneManager: + pass + + +class HostFilterTestCase(test.TestCase): + """Test case for host filters.""" + + def _host_caps(self, multiplier): + # Returns host capabilities in the following way: + # host1 = memory:free 10 (100max) + # disk:available 100 (1000max) + # hostN = memory:free 10 + 10N + # disk:available 100 + 100N + # in other words: hostN has more resources than host0 + # which means ... don't go above 10 hosts. + return {'host_name-description': 'XenServer %s' % multiplier, + 'host_hostname': 'xs-%s' % multiplier, + 'host_memory_total': 100, + 'host_memory_overhead': 10, + 'host_memory_free': 10 + multiplier * 10, + 'host_memory_free-computed': 10 + multiplier * 10, + 'host_other-config': {}, + 'host_ip_address': '192.168.1.%d' % (100 + multiplier), + 'host_cpu_info': {}, + 'disk_available': 100 + multiplier * 100, + 'disk_total': 1000, + 'disk_used': 0, + 'host_uuid': 'xxx-%d' % multiplier, + 'host_name-label': 'xs-%s' % multiplier} + + def setUp(self): + self.old_flag = FLAGS.default_host_filter + FLAGS.default_host_filter = \ + 'nova.scheduler.host_filter.AllHostsFilter' + self.instance_type = dict(name='tiny', + memory_mb=50, + vcpus=10, + local_gb=500, + flavorid=1, + swap=500, + rxtx_quota=30000, + rxtx_cap=200) + + self.zone_manager = FakeZoneManager() + states = {} + for x in xrange(10): + states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} + self.zone_manager.service_states = states + + def tearDown(self): + FLAGS.default_host_filter = self.old_flag + + def test_choose_filter(self): + # Test default filter ... + hf = host_filter.choose_host_filter() + self.assertEquals(hf._full_name(), + 'nova.scheduler.host_filter.AllHostsFilter') + # Test valid filter ... + hf = host_filter.choose_host_filter( + 'nova.scheduler.host_filter.InstanceTypeFilter') + self.assertEquals(hf._full_name(), + 'nova.scheduler.host_filter.InstanceTypeFilter') + # Test invalid filter ... + try: + host_filter.choose_host_filter('does not exist') + self.fail("Should not find host filter.") + except exception.SchedulerHostFilterNotFound: + pass + + def test_all_host_filter(self): + hf = host_filter.AllHostsFilter() + cooked = hf.instance_type_to_filter(self.instance_type) + hosts = hf.filter_hosts(self.zone_manager, cooked) + self.assertEquals(10, len(hosts)) + for host, capabilities in hosts: + self.assertTrue(host.startswith('host')) + + def test_instance_type_filter(self): + hf = host_filter.InstanceTypeFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = hf.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', + name) + hosts = hf.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + def test_json_filter(self): + hf = host_filter.JsonFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = hf.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) + hosts = hf.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + # Try some custom queries + + raw = ['or', + ['and', + ['<', '$compute.host_memory_free', 30], + ['<', '$compute.disk_available', 300] + ], + ['and', + ['>', '$compute.host_memory_free', 70], + ['>', '$compute.disk_available', 700] + ] + ] + cooked = json.dumps(raw) + hosts = hf.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['not', + ['=', '$compute.host_memory_free', 30], + ] + cooked = json.dumps(raw) + hosts = hf.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(9, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] + cooked = json.dumps(raw) + hosts = hf.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([2, 4, 6, 8, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + # Try some bogus input ... + raw = ['unknown command', ] + cooked = json.dumps(raw) + try: + hf.filter_hosts(self.zone_manager, cooked) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([]))) + self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({}))) + self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps( + ['not', True, False, True, False] + ))) + + try: + hf.filter_hosts(self.zone_manager, json.dumps( + 'not', True, False, True, False + )) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps(['=', '$foo', 100]))) + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps(['=', '$.....', 100]))) + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps( + ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]))) + + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps(['=', {}, ['>', '$missing....foo']]))) diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py new file mode 100644 index 000000000..506fa62fb --- /dev/null +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -0,0 +1,144 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Least Cost Scheduler +""" + +from nova import flags +from nova import test +from nova.scheduler import least_cost +from nova.tests.scheduler import test_zone_aware_scheduler + +MB = 1024 * 1024 +FLAGS = flags.FLAGS + + +class FakeHost(object): + def __init__(self, host_id, free_ram, io): + self.id = host_id + self.free_ram = free_ram + self.io = io + + +class WeightedSumTestCase(test.TestCase): + def test_empty_domain(self): + domain = [] + weighted_fns = [] + result = least_cost.weighted_sum(domain, weighted_fns) + expected = [] + self.assertEqual(expected, result) + + def test_basic_costing(self): + hosts = [ + FakeHost(1, 512 * MB, 100), + FakeHost(2, 256 * MB, 400), + FakeHost(3, 512 * MB, 100) + ] + + weighted_fns = [ + (1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost* + (2, lambda h: h.io), # Avoid high I/O + ] + + costs = least_cost.weighted_sum( + domain=hosts, weighted_fns=weighted_fns) + + # Each 256 MB unit of free-ram contributes 0.5 points by way of: + # cost = weight * (score/max_score) = 1 * (256/512) = 0.5 + # Each 100 iops of IO adds 0.5 points by way of: + # cost = 2 * (100/400) = 2 * 0.25 = 0.5 + expected = [1.5, 2.5, 1.5] + self.assertEqual(expected, costs) + + +class LeastCostSchedulerTestCase(test.TestCase): + def setUp(self): + super(LeastCostSchedulerTestCase, self).setUp() + + class FakeZoneManager: + pass + + zone_manager = FakeZoneManager() + + states = test_zone_aware_scheduler.fake_zone_manager_service_states( + num_hosts=10) + zone_manager.service_states = states + + self.sched = least_cost.LeastCostScheduler() + self.sched.zone_manager = zone_manager + + def tearDown(self): + super(LeastCostSchedulerTestCase, self).tearDown() + + def assertWeights(self, expected, num, request_spec, hosts): + weighted = self.sched.weigh_hosts(num, request_spec, hosts) + self.assertDictListMatch(weighted, expected, approx_equal=True) + + def test_no_hosts(self): + num = 1 + request_spec = {} + hosts = [] + + expected = [] + self.assertWeights(expected, num, request_spec, hosts) + + def test_noop_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] + FLAGS.noop_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [dict(weight=1, hostname=hostname) + for hostname, caps in hosts] + self.assertWeights(expected, num, request_spec, hosts) + + def test_cost_fn_weights(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.noop_cost_fn' + ] + FLAGS.noop_cost_fn_weight = 2 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [dict(weight=2, hostname=hostname) + for hostname, caps in hosts] + self.assertWeights(expected, num, request_spec, hosts) + + def test_fill_first_cost_fn(self): + FLAGS.least_cost_scheduler_cost_functions = [ + 'nova.scheduler.least_cost.fill_first_cost_fn' + ] + FLAGS.fill_first_cost_fn_weight = 1 + + num = 1 + request_spec = {} + hosts = self.sched.filter_hosts(num, request_spec) + + expected = [] + for idx, (hostname, caps) in enumerate(hosts): + # Costs are normalized so over 10 hosts, each host with increasing + # free ram will cost 1/N more. Since the lowest cost host has some + # free ram, we add in the 1/N for the base_cost + weight = 0.1 + (0.1 * idx) + weight_dict = dict(weight=weight, hostname=hostname) + expected.append(weight_dict) + + self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 54b3f80fb..50b6b52c6 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -61,7 +61,8 @@ class SchedulerTestCase(test.TestCase): """Test case for scheduler""" def setUp(self): super(SchedulerTestCase, self).setUp() - self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') + driver = 'nova.tests.scheduler.test_scheduler.TestDriver' + self.flags(scheduler_driver=driver) def _create_compute_service(self): """Create compute-manager(ComputeNode and Service record).""" @@ -196,7 +197,7 @@ class ZoneSchedulerTestCase(test.TestCase): service.topic = 'compute' service.id = kwargs['id'] service.availability_zone = kwargs['zone'] - service.created_at = datetime.datetime.utcnow() + service.created_at = utils.utcnow() return service def test_with_two_zones(self): @@ -290,7 +291,7 @@ class SimpleDriverTestCase(test.TestCase): dic['host'] = kwargs.get('host', 'dummy') s_ref = db.service_create(self.context, dic) if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys(): - t = datetime.datetime.utcnow() - datetime.timedelta(0) + t = utils.utcnow() - datetime.timedelta(0) dic['created_at'] = kwargs.get('created_at', t) dic['updated_at'] = kwargs.get('updated_at', t) db.service_update(self.context, s_ref['id'], dic) @@ -401,7 +402,7 @@ class SimpleDriverTestCase(test.TestCase): FLAGS.compute_manager) compute1.start() s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() + now = utils.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta db.service_update(self.context, s1['id'], {'updated_at': past}) @@ -542,7 +543,7 @@ class SimpleDriverTestCase(test.TestCase): def test_wont_sechedule_if_specified_host_is_down(self): compute1 = self.start_service('compute', host='host1') s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute') - now = datetime.datetime.utcnow() + now = utils.utcnow() delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2) past = now - delta db.service_update(self.context, s1['id'], {'updated_at': past}) @@ -692,7 +693,7 @@ class SimpleDriverTestCase(test.TestCase): dic = {'instance_id': instance_id, 'size': 1} v_ref = db.volume_create(self.context, {'instance_id': instance_id, 'size': 1}) - t1 = datetime.datetime.utcnow() - datetime.timedelta(1) + t1 = utils.utcnow() - datetime.timedelta(1) dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume', 'topic': 'volume', 'report_count': 0} s_ref = db.service_create(self.context, dic) @@ -709,7 +710,7 @@ class SimpleDriverTestCase(test.TestCase): """Confirms src-compute node is alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) + t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) @@ -737,7 +738,7 @@ class SimpleDriverTestCase(test.TestCase): """Confirms exception raises in case dest host does not exist.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t = datetime.datetime.utcnow() - datetime.timedelta(10) + t = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) @@ -796,7 +797,7 @@ class SimpleDriverTestCase(test.TestCase): # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) - t1 = datetime.datetime.utcnow() - datetime.timedelta(10) + t1 = utils.utcnow() - datetime.timedelta(10) s_ref = self._create_compute_service(created_at=t1, updated_at=t1, host=dest) diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py index fdcde34c9..561fdea94 100644 --- a/nova/tests/test_zone_aware_scheduler.py +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -22,6 +22,37 @@ from nova.scheduler import zone_aware_scheduler from nova.scheduler import zone_manager +def _host_caps(multiplier): + # Returns host capabilities in the following way: + # host1 = memory:free 10 (100max) + # disk:available 100 (1000max) + # hostN = memory:free 10 + 10N + # disk:available 100 + 100N + # in other words: hostN has more resources than host0 + # which means ... don't go above 10 hosts. + return {'host_name-description': 'XenServer %s' % multiplier, + 'host_hostname': 'xs-%s' % multiplier, + 'host_memory_total': 100, + 'host_memory_overhead': 10, + 'host_memory_free': 10 + multiplier * 10, + 'host_memory_free-computed': 10 + multiplier * 10, + 'host_other-config': {}, + 'host_ip_address': '192.168.1.%d' % (100 + multiplier), + 'host_cpu_info': {}, + 'disk_available': 100 + multiplier * 100, + 'disk_total': 1000, + 'disk_used': 0, + 'host_uuid': 'xxx-%d' % multiplier, + 'host_name-label': 'xs-%s' % multiplier} + + +def fake_zone_manager_service_states(num_hosts): + states = {} + for x in xrange(num_hosts): + states['host%02d' % (x + 1)] = {'compute': _host_caps(x)} + return states + + class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): def filter_hosts(self, num, specs): # NOTE(sirp): this is returning [(hostname, services)] @@ -38,16 +69,16 @@ class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): class FakeZoneManager(zone_manager.ZoneManager): def __init__(self): self.service_states = { - 'host1': { - 'compute': {'ram': 1000} - }, - 'host2': { - 'compute': {'ram': 2000} - }, - 'host3': { - 'compute': {'ram': 3000} - } - } + 'host1': { + 'compute': {'ram': 1000}, + }, + 'host2': { + 'compute': {'ram': 2000}, + }, + 'host3': { + 'compute': {'ram': 3000}, + }, + } class FakeEmptyZoneManager(zone_manager.ZoneManager): @@ -116,4 +147,6 @@ class ZoneAwareSchedulerTestCase(test.TestCase): sched.set_zone_manager(zm) fake_context = {} - self.assertRaises(driver.NoValidHost, sched.schedule, fake_context, {}) + self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, + fake_context, 1, + dict(host_filter=None, instance_type={})) diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index f02dd94b7..7d00bddfe 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -86,6 +86,7 @@ class _AuthManagerBaseTestCase(test.TestCase): super(_AuthManagerBaseTestCase, self).setUp() self.flags(connection_type='fake') self.manager = manager.AuthManager(new=True) + self.manager.mc.cache = {} def test_create_and_find_user(self): with user_generator(self.manager): diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 34a73ad1f..1bf1271c4 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -169,6 +169,25 @@ class CloudTestCase(test.TestCase): db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id']) + def test_create_volume_from_snapshot(self): + """Makes sure create_volume works when we specify a snapshot.""" + vol = db.volume_create(self.context, {'size': 1}) + snap = db.snapshot_create(self.context, {'volume_id': vol['id'], + 'volume_size': vol['size'], + 'status': "available"}) + snapshot_id = ec2utils.id_to_ec2_id(snap['id'], 'snap-%08x') + + result = self.cloud.create_volume(self.context, + snapshot_id=snapshot_id) + volume_id = result['volumeId'] + result = self.cloud.describe_volumes(self.context) + self.assertEqual(len(result['volumeSet']), 2) + self.assertEqual(result['volumeSet'][1]['volumeId'], volume_id) + + db.volume_destroy(self.context, ec2utils.ec2_id_to_id(volume_id)) + db.snapshot_destroy(self.context, snap['id']) + db.volume_destroy(self.context, vol['id']) + def test_describe_availability_zones(self): """Makes sure describe_availability_zones works and filters results.""" service1 = db.service_create(self.context, {'host': 'host1_zones', @@ -235,10 +254,10 @@ class CloudTestCase(test.TestCase): def test_describe_instances(self): """Makes sure describe_instances works and filters results.""" inst1 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_id': 1, + 'image_ref': 1, 'host': 'host1'}) inst2 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_id': 1, + 'image_ref': 1, 'host': 'host2'}) comp1 = db.service_create(self.context, {'host': 'host1', 'availability_zone': 'zone1', @@ -428,7 +447,7 @@ class CloudTestCase(test.TestCase): def test_terminate_instances(self): inst1 = db.instance_create(self.context, {'reservation_id': 'a', - 'image_id': 1, + 'image_ref': 1, 'host': 'host1'}) terminate_instances = self.cloud.terminate_instances # valid instance_id diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 9170837b6..b4ac2dbc4 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -19,7 +19,6 @@ Tests For Compute """ -import datetime import mox import stubout @@ -84,7 +83,7 @@ class ComputeTestCase(test.TestCase): def _create_instance(self, params={}): """Create a test instance""" inst = {} - inst['image_id'] = 1 + inst['image_ref'] = 1 inst['reservation_id'] = 'r-fakeres' inst['launch_time'] = '10' inst['user_id'] = self.user.id @@ -150,7 +149,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_id=None, + image_href=None, security_group=['testgroup']) try: self.assertEqual(len(db.security_group_get_by_instance( @@ -168,7 +167,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_id=None, + image_href=None, security_group=['testgroup']) try: db.instance_destroy(self.context, ref[0]['id']) @@ -184,7 +183,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, instance_type=instance_types.get_default_instance_type(), - image_id=None, + image_href=None, security_group=['testgroup']) try: @@ -217,12 +216,12 @@ class ComputeTestCase(test.TestCase): instance_ref = db.instance_get(self.context, instance_id) self.assertEqual(instance_ref['launched_at'], None) self.assertEqual(instance_ref['deleted_at'], None) - launch = datetime.datetime.utcnow() + launch = utils.utcnow() self.compute.run_instance(self.context, instance_id) instance_ref = db.instance_get(self.context, instance_id) self.assert_(instance_ref['launched_at'] > launch) self.assertEqual(instance_ref['deleted_at'], None) - terminate = datetime.datetime.utcnow() + terminate = utils.utcnow() self.compute.terminate_instance(self.context, instance_id) self.context = self.context.elevated(True) instance_ref = db.instance_get(self.context, instance_id) diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py index 1a9a867ee..831e7670f 100644 --- a/nova/tests/test_console.py +++ b/nova/tests/test_console.py @@ -20,8 +20,6 @@ Tests For Console proxy. """ -import datetime - from nova import context from nova import db from nova import exception diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py index c029d41e6..3361c7b73 100644 --- a/nova/tests/test_host_filter.py +++ b/nova/tests/test_host_filter.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. """ -Tests For Scheduler Host Filter Drivers. +Tests For Scheduler Host Filters. """ import json @@ -31,7 +31,7 @@ class FakeZoneManager: class HostFilterTestCase(test.TestCase): - """Test case for host filter drivers.""" + """Test case for host filters.""" def _host_caps(self, multiplier): # Returns host capabilities in the following way: @@ -57,8 +57,8 @@ class HostFilterTestCase(test.TestCase): 'host_name-label': 'xs-%s' % multiplier} def setUp(self): - self.old_flag = FLAGS.default_host_filter_driver - FLAGS.default_host_filter_driver = \ + self.old_flag = FLAGS.default_host_filter + FLAGS.default_host_filter = \ 'nova.scheduler.host_filter.AllHostsFilter' self.instance_type = dict(name='tiny', memory_mb=50, @@ -76,51 +76,52 @@ class HostFilterTestCase(test.TestCase): self.zone_manager.service_states = states def tearDown(self): - FLAGS.default_host_filter_driver = self.old_flag + FLAGS.default_host_filter = self.old_flag - def test_choose_driver(self): - # Test default driver ... - driver = host_filter.choose_driver() - self.assertEquals(driver._full_name(), + def test_choose_filter(self): + # Test default filter ... + hf = host_filter.choose_host_filter() + self.assertEquals(hf._full_name(), 'nova.scheduler.host_filter.AllHostsFilter') - # Test valid driver ... - driver = host_filter.choose_driver( - 'nova.scheduler.host_filter.FlavorFilter') - self.assertEquals(driver._full_name(), - 'nova.scheduler.host_filter.FlavorFilter') - # Test invalid driver ... + # Test valid filter ... + hf = host_filter.choose_host_filter( + 'nova.scheduler.host_filter.InstanceTypeFilter') + self.assertEquals(hf._full_name(), + 'nova.scheduler.host_filter.InstanceTypeFilter') + # Test invalid filter ... try: - host_filter.choose_driver('does not exist') - self.fail("Should not find driver") - except exception.SchedulerHostFilterDriverNotFound: + host_filter.choose_host_filter('does not exist') + self.fail("Should not find host filter.") + except exception.SchedulerHostFilterNotFound: pass - def test_all_host_driver(self): - driver = host_filter.AllHostsFilter() - cooked = driver.instance_type_to_filter(self.instance_type) - hosts = driver.filter_hosts(self.zone_manager, cooked) + def test_all_host_filter(self): + hf = host_filter.AllHostsFilter() + cooked = hf.instance_type_to_filter(self.instance_type) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(10, len(hosts)) for host, capabilities in hosts: self.assertTrue(host.startswith('host')) - def test_flavor_driver(self): - driver = host_filter.FlavorFilter() + def test_instance_type_filter(self): + hf = host_filter.InstanceTypeFilter() # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.FlavorFilter', name) - hosts = driver.filter_hosts(self.zone_manager, cooked) + name, cooked = hf.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', + name) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() self.assertEquals('host05', just_hosts[0]) self.assertEquals('host10', just_hosts[5]) - def test_json_driver(self): - driver = host_filter.JsonFilter() + def test_json_filter(self): + hf = host_filter.JsonFilter() # filter all hosts that can support 50 ram and 500 disk - name, cooked = driver.instance_type_to_filter(self.instance_type) + name, cooked = hf.instance_type_to_filter(self.instance_type) self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() @@ -132,15 +133,16 @@ class HostFilterTestCase(test.TestCase): raw = ['or', ['and', ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300] + ['<', '$compute.disk_available', 300], ], ['and', ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700] - ] + ['>', '$compute.disk_available', 700], + ], ] + cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(5, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -152,7 +154,7 @@ class HostFilterTestCase(test.TestCase): ['=', '$compute.host_memory_free', 30], ] cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(9, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -162,7 +164,7 @@ class HostFilterTestCase(test.TestCase): raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] cooked = json.dumps(raw) - hosts = driver.filter_hosts(self.zone_manager, cooked) + hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(5, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -174,35 +176,30 @@ class HostFilterTestCase(test.TestCase): raw = ['unknown command', ] cooked = json.dumps(raw) try: - driver.filter_hosts(self.zone_manager, cooked) + hf.filter_hosts(self.zone_manager, cooked) self.fail("Should give KeyError") except KeyError, e: pass - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) - self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False] - ))) + self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([]))) + self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({}))) + self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps( + ['not', True, False, True, False]))) try: - driver.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False - )) + hf.filter_hosts(self.zone_manager, json.dumps( + 'not', True, False, True, False)) self.fail("Should give KeyError") except KeyError, e: pass - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$foo', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', '$.....', 100] - ))) - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] - ))) - - self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( - ['=', {}, ['>', '$missing....foo']] - ))) + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps(['=', '$foo', 100]))) + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps(['=', '$.....', 100]))) + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps( + ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]))) + + self.assertFalse(hf.filter_hosts(self.zone_manager, + json.dumps(['=', {}, ['>', '$missing....foo']]))) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index eb88f0d84..b6b36745a 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -18,6 +18,7 @@ import eventlet import mox import os import re +import shutil import sys from xml.etree.ElementTree import fromstring as xml_to_tree @@ -160,6 +161,7 @@ class LibvirtConnTestCase(test.TestCase): 'vcpus': 2, 'project_id': 'fake', 'bridge': 'br101', + 'image_ref': '123456', 'instance_type_id': '5'} # m1.small def lazy_load_library_exists(self): @@ -280,6 +282,68 @@ class LibvirtConnTestCase(test.TestCase): instance_data = dict(self.test_instance) self._check_xml_and_container(instance_data) + def test_snapshot(self): + FLAGS.image_service = 'nova.image.fake.FakeImageService' + + # Only file-based instance storages are supported at the moment + test_xml = """ + <domain type='kvm'> + <devices> + <disk type='file'> + <source file='filename'/> + </disk> + </devices> + </domain> + """ + + class FakeVirtDomain(object): + + def __init__(self): + pass + + def snapshotCreateXML(self, *args): + return None + + def XMLDesc(self, *args): + return test_xml + + def fake_lookup(instance_name): + if instance_name == instance_ref.name: + return FakeVirtDomain() + + def fake_execute(*args): + # Touch filename to pass 'with open(out_path)' + open(args[-1], "a").close() + + # Start test + image_service = utils.import_object(FLAGS.image_service) + + # Assuming that base image already exists in image_service + instance_ref = db.instance_create(self.context, self.test_instance) + properties = {'instance_id': instance_ref['id'], + 'user_id': str(self.context.user_id)} + snapshot_name = 'test-snap' + sent_meta = {'name': snapshot_name, 'is_public': False, + 'status': 'creating', 'properties': properties} + # Create new image. It will be updated in snapshot method + # To work with it from snapshot, the single image_service is needed + recv_meta = image_service.create(context, sent_meta) + + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') + connection.LibvirtConnection._conn.lookupByName = fake_lookup + self.mox.StubOutWithMock(connection.utils, 'execute') + connection.utils.execute = fake_execute + + self.mox.ReplayAll() + + conn = connection.LibvirtConnection(False) + conn.snapshot(instance_ref, recv_meta['id']) + + snapshot = image_service.show(context, recv_meta['id']) + self.assertEquals(snapshot['properties']['image_state'], 'available') + self.assertEquals(snapshot['status'], 'active') + self.assertEquals(snapshot['name'], snapshot_name) + def test_multi_nic(self): instance_data = dict(self.test_instance) network_info = _create_network_info(2) @@ -645,6 +709,8 @@ class LibvirtConnTestCase(test.TestCase): except Exception, e: count = (0 <= str(e.message).find('Unexpected method call')) + shutil.rmtree(os.path.join(FLAGS.instances_path, instance.name)) + self.assertTrue(count) def test_get_host_ip_addr(self): diff --git a/nova/tests/test_middleware.py b/nova/tests/test_middleware.py index 6564a6955..40d117c45 100644 --- a/nova/tests/test_middleware.py +++ b/nova/tests/test_middleware.py @@ -16,7 +16,6 @@ # License for the specific language governing permissions and limitations # under the License. -import datetime import webob import webob.dec import webob.exc diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py index cf8f4c05e..c5875a843 100644 --- a/nova/tests/test_misc.py +++ b/nova/tests/test_misc.py @@ -21,11 +21,24 @@ import select from eventlet import greenpool from eventlet import greenthread +from nova import exception from nova import test from nova import utils from nova.utils import parse_mailmap, str_dict_replace +class ExceptionTestCase(test.TestCase): + @staticmethod + def _raise_exc(exc): + raise exc() + + def test_exceptions_raise(self): + for name in dir(exception): + exc = getattr(exception, name) + if isinstance(exc, type): + self.assertRaises(exc, self._raise_exc, exc) + + class ProjectTestCase(test.TestCase): def test_authors_up_to_date(self): topdir = os.path.normpath(os.path.dirname(__file__) + '/../../') diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py index b6b0fcc68..64b799a2c 100644 --- a/nova/tests/test_notifier.py +++ b/nova/tests/test_notifier.py @@ -13,10 +13,12 @@ # License for the specific language governing permissions and limitations # under the License. -import nova +import stubout +import nova from nova import context from nova import flags +from nova import log from nova import rpc import nova.notifier.api from nova.notifier.api import notify @@ -24,8 +26,6 @@ from nova.notifier import no_op_notifier from nova.notifier import rabbit_notifier from nova import test -import stubout - class NotifierTestCase(test.TestCase): """Test case for notifications""" @@ -115,3 +115,22 @@ class NotifierTestCase(test.TestCase): notify('publisher_id', 'event_type', 'DEBUG', dict(a=3)) self.assertEqual(self.test_topic, 'testnotify.debug') + + def test_error_notification(self): + self.stubs.Set(nova.flags.FLAGS, 'notification_driver', + 'nova.notifier.rabbit_notifier') + self.stubs.Set(nova.flags.FLAGS, 'publish_errors', True) + LOG = log.getLogger('nova') + LOG.setup_from_flags() + msgs = [] + + def mock_cast(context, topic, data): + msgs.append(data) + + self.stubs.Set(nova.rpc, 'cast', mock_cast) + LOG.error('foo') + self.assertEqual(1, len(msgs)) + msg = msgs[0] + self.assertEqual(msg['event_type'], 'error_notification') + self.assertEqual(msg['priority'], 'ERROR') + self.assertEqual(msg['payload']['error'], 'foo') diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 916fca55e..0691231e4 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -223,7 +223,7 @@ class QuotaTestCase(test.TestCase): min_count=1, max_count=1, instance_type=inst_type, - image_id=1) + image_href=1) for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -237,7 +237,7 @@ class QuotaTestCase(test.TestCase): min_count=1, max_count=1, instance_type=inst_type, - image_id=1) + image_href=1) for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -250,6 +250,7 @@ class QuotaTestCase(test.TestCase): volume.API().create, self.context, size=10, + snapshot_id=None, name='', description='') for volume_id in volume_ids: @@ -263,6 +264,7 @@ class QuotaTestCase(test.TestCase): volume.API().create, self.context, size=10, + snapshot_id=None, name='', description='') for volume_id in volume_ids: @@ -293,7 +295,7 @@ class QuotaTestCase(test.TestCase): min_count=1, max_count=1, instance_type=inst_type, - image_id='fake', + image_href='fake', metadata=metadata) def test_default_allowed_injected_files(self): @@ -339,16 +341,18 @@ class QuotaTestCase(test.TestCase): self.assertEqual(limit, 23456) def _create_with_injected_files(self, files): + FLAGS.image_service = 'nova.image.fake.FakeImageService' api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') api.create(self.context, min_count=1, max_count=1, - instance_type=inst_type, image_id='fake', + instance_type=inst_type, image_href='3', injected_files=files) def test_no_injected_files(self): + FLAGS.image_service = 'nova.image.fake.FakeImageService' api = compute.API(image_service=self.StubImageService()) inst_type = instance_types.get_instance_type_by_name('m1.small') - api.create(self.context, instance_type=inst_type, image_id='fake') + api.create(self.context, instance_type=inst_type, image_href='3') def test_max_injected_files(self): files = [] diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py index 22b66010a..e5ebd1600 100644 --- a/nova/tests/test_vmwareapi.py +++ b/nova/tests/test_vmwareapi.py @@ -55,8 +55,7 @@ class VMWareAPIVMTestCase(test.TestCase): vmwareapi_fake.reset()
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.set_stubs(self.stubs)
- glance_stubs.stubout_glance_client(self.stubs,
- glance_stubs.FakeGlance)
+ glance_stubs.stubout_glance_client(self.stubs)
self.conn = vmwareapi_conn.get_connection(False)
def _create_instance_in_the_db(self):
@@ -64,7 +63,7 @@ class VMWareAPIVMTestCase(test.TestCase): 'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
- 'image_id': "1",
+ 'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'instance_type': 'm1.large',
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 3472b1f59..4f10ee6af 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -45,10 +45,11 @@ class VolumeTestCase(test.TestCase): self.context = context.get_admin_context() @staticmethod - def _create_volume(size='0'): + def _create_volume(size='0', snapshot_id=None): """Create a volume object.""" vol = {} vol['size'] = size + vol['snapshot_id'] = snapshot_id vol['user_id'] = 'fake' vol['project_id'] = 'fake' vol['availability_zone'] = FLAGS.storage_availability_zone @@ -69,6 +70,25 @@ class VolumeTestCase(test.TestCase): self.context, volume_id) + def test_create_volume_from_snapshot(self): + """Test volume can be created from a snapshot.""" + volume_src_id = self._create_volume() + self.volume.create_volume(self.context, volume_src_id) + snapshot_id = self._create_snapshot(volume_src_id) + self.volume.create_snapshot(self.context, volume_src_id, snapshot_id) + volume_dst_id = self._create_volume(0, snapshot_id) + self.volume.create_volume(self.context, volume_dst_id, snapshot_id) + self.assertEqual(volume_dst_id, db.volume_get( + context.get_admin_context(), + volume_dst_id).id) + self.assertEqual(snapshot_id, db.volume_get( + context.get_admin_context(), + volume_dst_id).snapshot_id) + + self.volume.delete_volume(self.context, volume_dst_id) + self.volume.delete_snapshot(self.context, snapshot_id) + self.volume.delete_volume(self.context, volume_src_id) + def test_too_big_volume(self): """Ensure failure if a too large of a volume is requested.""" # FIXME(vish): validation needs to move into the data layer in diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 9d56c1644..3a175b106 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -79,7 +79,7 @@ class XenAPIVolumeTestCase(test.TestCase): self.values = {'id': 1, 'project_id': 'fake', 'user_id': 'fake', - 'image_id': 1, + 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large @@ -193,8 +193,7 @@ class XenAPIVMTestCase(test.TestCase): stubs.stubout_is_vdi_pv(self.stubs) self.stubs.Set(VMOps, 'reset_network', reset_network) stubs.stub_out_vm_methods(self.stubs) - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) fake_utils.stub_out_utils_execute(self.stubs) self.context = context.RequestContext('fake', 'fake', False) self.conn = xenapi_conn.get_connection(False) @@ -207,7 +206,7 @@ class XenAPIVMTestCase(test.TestCase): 'id': id, 'project_id': proj, 'user_id': user, - 'image_id': 1, + 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large @@ -351,14 +350,14 @@ class XenAPIVMTestCase(test.TestCase): self.assertEquals(self.vm['HVM_boot_params'], {}) self.assertEquals(self.vm['HVM_boot_policy'], '') - def _test_spawn(self, image_id, kernel_id, ramdisk_id, + def _test_spawn(self, image_ref, kernel_id, ramdisk_id, instance_type_id="3", os_type="linux", instance_id=1, check_injection=False): stubs.stubout_loopingcall_start(self.stubs) values = {'id': instance_id, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': image_id, + 'image_ref': image_ref, 'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id, 'instance_type_id': instance_type_id, @@ -567,7 +566,7 @@ class XenAPIVMTestCase(test.TestCase): 'id': 1, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': 1, + 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large @@ -641,7 +640,7 @@ class XenAPIMigrateInstance(test.TestCase): self.values = {'id': 1, 'project_id': self.project.id, 'user_id': self.user.id, - 'image_id': 1, + 'image_ref': 1, 'kernel_id': None, 'ramdisk_id': None, 'local_gb': 5, @@ -652,8 +651,7 @@ class XenAPIMigrateInstance(test.TestCase): fake_utils.stub_out_utils_execute(self.stubs) stubs.stub_out_migration_methods(self.stubs) stubs.stubout_get_this_vm_uuid(self.stubs) - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) def tearDown(self): super(XenAPIMigrateInstance, self).tearDown() @@ -679,8 +677,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): """Unit tests for code that detects the ImageType.""" def setUp(self): super(XenAPIDetermineDiskImageTestCase, self).setUp() - glance_stubs.stubout_glance_client(self.stubs, - glance_stubs.FakeGlance) + glance_stubs.stubout_glance_client(self.stubs) class FakeInstance(object): pass @@ -697,7 +694,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): def test_instance_disk(self): """If a kernel is specified, the image type is DISK (aka machine).""" FLAGS.xenapi_image_service = 'objectstore' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL self.assert_disk_type(vm_utils.ImageType.DISK) @@ -707,7 +704,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): DISK_RAW is assumed. """ FLAGS.xenapi_image_service = 'objectstore' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_RAW) @@ -717,7 +714,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): this case will be 'raw'. """ FLAGS.xenapi_image_service = 'glance' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_RAW) @@ -727,7 +724,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): this case will be 'vhd'. """ FLAGS.xenapi_image_service = 'glance' - self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD + self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_VHD) diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index 0addd5573..764de42d8 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -61,7 +61,7 @@ def stub_out_db_instance_api(stubs): 'name': values['name'],
'id': values['id'],
'reservation_id': utils.generate_uid('r'),
- 'image_id': values['image_id'],
+ 'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
'state_description': 'scheduling',
diff --git a/nova/utils.py b/nova/utils.py index 361fc9873..b1638e72c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -307,7 +307,7 @@ def get_my_linklocal(interface): def utcnow(): - """Overridable version of datetime.datetime.utcnow.""" + """Overridable version of utils.utcnow.""" if utcnow.override_time: return utcnow.override_time return datetime.datetime.utcnow() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 5ac376e46..0225797d7 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -82,6 +82,21 @@ class FakeConnection(driver.ComputeDriver): def __init__(self): self.instances = {} + self.host_status = { + 'host_name-description': 'Fake Host', + 'host_hostname': 'fake-mini', + 'host_memory_total': 8000000000, + 'host_memory_overhead': 10000000, + 'host_memory_free': 7900000000, + 'host_memory_free_computed': 7900000000, + 'host_other_config': {}, + 'host_ip_address': '192.168.1.109', + 'host_cpu_info': {}, + 'disk_available': 500000000000, + 'disk_total': 600000000000, + 'disk_used': 100000000000, + 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', + 'host_name_label': 'fake-mini'} @classmethod def instance(cls): @@ -456,3 +471,11 @@ class FakeConnection(driver.ComputeDriver): def test_remove_vm(self, instance_name): """ Removes the named VM, as if it crashed. For testing""" self.instances.pop(instance_name) + + def update_host_status(self): + """Return fake Host Status of ram, disk, network.""" + return self.host_status + + def get_host_stats(self, refresh=False): + """Return fake Host Status of ram, disk, network.""" + return self.host_status diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 1142e97a4..05b4775c1 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -151,7 +151,7 @@ class HyperVConnection(driver.ComputeDriver): base_vhd_filename = os.path.join(FLAGS.instances_path, instance.name) vhdfile = "%s.vhd" % (base_vhd_filename) - images.fetch(instance['image_id'], vhdfile, user, project) + images.fetch(instance['image_ref'], vhdfile, user, project) try: self._create_vm(instance) diff --git a/nova/virt/images.py b/nova/virt/images.py index 02c898fda..de7ac61df 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -23,6 +23,7 @@ Handling of VM disk images. from nova import context from nova import flags +import nova.image from nova import log as logging from nova import utils @@ -31,12 +32,12 @@ FLAGS = flags.FLAGS LOG = logging.getLogger('nova.virt.images') -def fetch(image_id, path, _user, _project): +def fetch(image_href, path, _user, _project): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. - image_service = utils.import_object(FLAGS.image_service) + (image_service, image_id) = nova.image.get_image_service(image_href) with open(path, "wb") as image_file: elevated = context.get_admin_context() metadata = image_service.get(elevated, image_id, image_file) diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index de2497a76..20986d4d5 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -116,7 +116,7 @@ </serial> #if $getVar('vncserver_host', False) - <graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='${vncserver_host}'/> + <graphics type='vnc' port='-1' autoport='yes' keymap='${vnc_keymap}' listen='${vncserver_host}'/> #end if </devices> </domain> diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 94a703954..c491418ae 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -36,6 +36,7 @@ Supports KVM, LXC, QEMU, UML, and XEN. """ +import hashlib import multiprocessing import os import random @@ -57,6 +58,7 @@ from nova import context from nova import db from nova import exception from nova import flags +import nova.image from nova import log as logging from nova import utils from nova import vnc @@ -378,7 +380,7 @@ class LibvirtConnection(driver.ComputeDriver): virt_dom.detachDevice(xml) @exception.wrap_exception - def snapshot(self, instance, image_id): + def snapshot(self, instance, image_href): """Create snapshot from a running VM instance. This command only works with qemu 0.14+, the qemu_img flag is @@ -386,17 +388,23 @@ class LibvirtConnection(driver.ComputeDriver): to support this command. """ - image_service = utils.import_object(FLAGS.image_service) virt_dom = self._lookup_by_name(instance['name']) elevated = context.get_admin_context() - base = image_service.show(elevated, instance['image_id']) + (image_service, image_id) = nova.image.get_image_service( + instance['image_ref']) + base = image_service.show(elevated, image_id) + (snapshot_image_service, snapshot_image_id) = \ + nova.image.get_image_service(image_href) + snapshot = snapshot_image_service.show(elevated, snapshot_image_id) metadata = {'disk_format': base['disk_format'], 'container_format': base['container_format'], 'is_public': False, - 'name': '%s.%s' % (base['name'], image_id), - 'properties': {'architecture': base['architecture'], + 'status': 'active', + 'name': snapshot['name'], + 'properties': {'architecture': + base['properties']['architecture'], 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', 'image_state': 'available', @@ -438,7 +446,7 @@ class LibvirtConnection(driver.ComputeDriver): # Upload that image to the image service with open(out_path) as image_file: image_service.update(elevated, - image_id, + image_href, metadata, image_file) @@ -488,19 +496,27 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def pause(self, instance, callback): - raise exception.ApiError("pause not supported for libvirt.") + """Pause VM instance""" + dom = self._lookup_by_name(instance.name) + dom.suspend() @exception.wrap_exception def unpause(self, instance, callback): - raise exception.ApiError("unpause not supported for libvirt.") + """Unpause paused VM instance""" + dom = self._lookup_by_name(instance.name) + dom.resume() @exception.wrap_exception def suspend(self, instance, callback): - raise exception.ApiError("suspend not supported for libvirt") + """Suspend the specified instance""" + dom = self._lookup_by_name(instance.name) + dom.managedSave(0) @exception.wrap_exception def resume(self, instance, callback): - raise exception.ApiError("resume not supported for libvirt") + """resume the specified instance""" + dom = self._lookup_by_name(instance.name) + dom.create() @exception.wrap_exception def rescue(self, instance): @@ -776,7 +792,7 @@ class LibvirtConnection(driver.ComputeDriver): project = manager.AuthManager().get_project(inst['project_id']) if not disk_images: - disk_images = {'image_id': inst['image_id'], + disk_images = {'image_id': inst['image_ref'], 'kernel_id': inst['kernel_id'], 'ramdisk_id': inst['ramdisk_id']} @@ -797,7 +813,7 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project) - root_fname = '%08x' % int(disk_images['image_id']) + root_fname = hashlib.sha1(disk_images['image_id']).hexdigest() size = FLAGS.minimum_root_size inst_type_id = inst['instance_type_id'] @@ -872,7 +888,7 @@ class LibvirtConnection(driver.ComputeDriver): if key or net: inst_name = inst['name'] - img_id = inst.image_id + img_id = inst.image_ref if key: LOG.info(_('instance %(inst_name)s: injecting key into' ' image %(img_id)s') % locals()) @@ -962,6 +978,7 @@ class LibvirtConnection(driver.ComputeDriver): if FLAGS.vnc_enabled: if FLAGS.libvirt_type != 'lxc': xml_info['vncserver_host'] = FLAGS.vncserver_host + xml_info['vnc_keymap'] = FLAGS.vnc_keymap if not rescue: if instance['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 89c0b9769..84153fa1e 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -195,7 +195,7 @@ class NWFilterFirewall(FirewallDriver): logging.info('ensuring static filters') self._ensure_static_filters() - if instance['image_id'] == str(FLAGS.vpn_image_id): + if instance['image_ref'] == str(FLAGS.vpn_image_id): base_filter = 'nova-vpn' else: base_filter = 'nova-base' @@ -357,7 +357,7 @@ class NWFilterFirewall(FirewallDriver): def _create_network_filters(self, instance, network_info, instance_secgroup_filter_name): - if instance['image_id'] == str(FLAGS.vpn_image_id): + if instance['image_ref'] == str(FLAGS.vpn_image_id): base_filter = 'nova-vpn' else: base_filter = 'nova-base' diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 6d7149841..5f76b0df5 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -150,7 +150,7 @@ class VMWareVMOps(object): """
image_size, image_properties = \
vmware_images.get_vmdk_size_and_properties(
- instance.image_id, instance)
+ instance.image_ref, instance)
vmdk_file_size_in_kb = int(image_size) / 1024
os_type = image_properties.get("vmware_ostype", "otherGuest")
adapter_type = image_properties.get("vmware_adaptertype",
@@ -265,23 +265,23 @@ class VMWareVMOps(object): def _fetch_image_on_esx_datastore():
"""Fetch image from Glance to ESX datastore."""
- LOG.debug(_("Downloading image file data %(image_id)s to the ESX "
+ LOG.debug(_("Downloading image file data %(image_ref)s to the ESX "
"data store %(data_store_name)s") %
- ({'image_id': instance.image_id,
+ ({'image_ref': instance.image_ref,
'data_store_name': data_store_name}))
# Upload the -flat.vmdk file whose meta-data file we just created
# above
vmware_images.fetch_image(
- instance.image_id,
+ instance.image_ref,
instance,
host=self._session._host_ip,
data_center_name=self._get_datacenter_name_and_ref()[1],
datastore_name=data_store_name,
cookies=cookies,
file_path=flat_uploaded_vmdk_name)
- LOG.debug(_("Downloaded image file data %(image_id)s to the ESX "
+ LOG.debug(_("Downloaded image file data %(image_ref)s to the ESX "
"data store %(data_store_name)s") %
- ({'image_id': instance.image_id,
+ ({'image_ref': instance.image_ref,
'data_store_name': data_store_name}))
_fetch_image_on_esx_datastore()
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py index 50c6baedf..48edc5384 100644 --- a/nova/virt/vmwareapi/vmware_images.py +++ b/nova/virt/vmwareapi/vmware_images.py @@ -18,10 +18,9 @@ Utility functions for Image transfer.
"""
-from glance import client
-
from nova import exception
from nova import flags
+import nova.image
from nova import log as logging
from nova.virt.vmwareapi import io_util
from nova.virt.vmwareapi import read_write_util
@@ -117,8 +116,8 @@ def upload_image(image, instance, **kwargs): def _get_glance_image(image, instance, **kwargs):
"""Download image from the glance image server."""
LOG.debug(_("Downloading image %s from glance image server") % image)
- glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port)
- metadata, read_iter = glance_client.get_image(image)
+ (glance_client, image_id) = nova.image.get_glance_client(image)
+ metadata, read_iter = glance_client.get_image(image_id)
read_file_handle = read_write_util.GlanceFileRead(read_iter)
file_size = int(metadata['size'])
write_file_handle = read_write_util.VMWareHTTPWriteFile(
@@ -153,7 +152,7 @@ def _put_glance_image(image, instance, **kwargs): kwargs.get("cookies"),
kwargs.get("file_path"))
file_size = read_file_handle.get_size()
- glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ (glance_client, image_id) = nova.image.get_glance_client(image)
# The properties and other fields that we need to set for the image.
image_metadata = {"is_public": True,
"disk_format": "vmdk",
@@ -165,7 +164,7 @@ def _put_glance_image(image, instance, **kwargs): "vmware_image_version":
kwargs.get("image_version")}}
start_transfer(read_file_handle, file_size, glance_client=glance_client,
- image_id=image, image_meta=image_metadata)
+ image_id=image_id, image_meta=image_metadata)
LOG.debug(_("Uploaded image %s to the Glance image server") % image)
@@ -188,9 +187,8 @@ def get_vmdk_size_and_properties(image, instance): LOG.debug(_("Getting image size for the image %s") % image)
if FLAGS.image_service == "nova.image.glance.GlanceImageService":
- glance_client = client.Client(FLAGS.glance_host,
- FLAGS.glance_port)
- meta_data = glance_client.get_image_meta(image)
+ (glance_client, image_id) = nova.image.get_glance_client(image)
+ meta_data = glance_client.get_image_meta(image_id)
size, properties = meta_data["size"], meta_data["properties"]
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
raise NotImplementedError
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 76988b172..165888cb2 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -51,13 +51,13 @@ A fake XenAPI SDK. """ -import datetime import uuid from pprint import pformat from nova import exception from nova import log as logging +from nova import utils _CLASSES = ['host', 'network', 'session', 'SR', 'VBD', @@ -540,7 +540,7 @@ class SessionBase(object): except Failure, exc: task['error_info'] = exc.details task['status'] = 'failed' - task['finished'] = datetime.datetime.now() + task['finished'] = utils.utcnow() return task_ref def _check_session(self, params): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 06ee8ee9b..98668e6ae 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -32,6 +32,7 @@ from xml.dom import minidom import glance.client from nova import exception from nova import flags +import nova.image from nova import log as logging from nova import utils from nova.auth.manager import AuthManager @@ -455,8 +456,8 @@ class VMHelper(HelperBase): # DISK restores sr_ref = safe_find_sr(session) - client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) - meta, image_file = client.get_image(image) + glance_client, image_id = nova.image.get_glance_client(image) + meta, image_file = glance_client.get_image(image_id) virtual_size = int(meta['size']) vdi_size = virtual_size LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals()) @@ -515,10 +516,10 @@ class VMHelper(HelperBase): ImageType.DISK_RAW: 'DISK_RAW', ImageType.DISK_VHD: 'DISK_VHD'} disk_format = pretty_format[image_type] - image_id = instance.image_id + image_ref = instance.image_ref instance_id = instance.id LOG.debug(_("Detected %(disk_format)s format for image " - "%(image_id)s, instance %(instance_id)s") % locals()) + "%(image_ref)s, instance %(instance_id)s") % locals()) def determine_from_glance(): glance_disk_format2nova_type = { @@ -527,8 +528,9 @@ class VMHelper(HelperBase): 'ari': ImageType.KERNEL_RAMDISK, 'raw': ImageType.DISK_RAW, 'vhd': ImageType.DISK_VHD} - client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) - meta = client.get_image_meta(instance.image_id) + image_ref = instance.image_ref + glance_client, image_id = nova.image.get_glance_client(image_ref) + meta = glance_client.get_image_meta(image_id) disk_format = meta['disk_format'] try: return glance_disk_format2nova_type[disk_format] @@ -1044,6 +1046,8 @@ def _stream_disk(dev, image_type, virtual_size, image_file): offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) + utils.execute('sudo', 'chown', os.getuid(), '/dev/%s' % dev) + with open('/dev/%s' % dev, 'wb') as f: f.seek(offset) for chunk in image_file: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 2b3fb6a39..32dae97c2 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -111,7 +111,7 @@ class VMOps(object): project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) vdis = VMHelper.fetch_image(self._session, - instance.id, instance.image_id, user, project, + instance.id, instance.image_ref, user, project, disk_image_type) return vdis diff --git a/nova/vnc/__init__.py b/nova/vnc/__init__.py index b5b00e44e..859bfd65f 100644 --- a/nova/vnc/__init__.py +++ b/nova/vnc/__init__.py @@ -32,3 +32,5 @@ flags.DEFINE_string('vncserver_host', '0.0.0.0', 'the host interface on which vnc server should listen') flags.DEFINE_bool('vnc_enabled', True, 'enable vnc related features') +flags.DEFINE_string('vnc_keymap', 'en-us', + 'keymap for vnc') diff --git a/nova/volume/api.py b/nova/volume/api.py index c1af30de0..b07f2e94b 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -20,14 +20,13 @@ Handles all requests relating to volumes. """ -import datetime -from nova import db from nova import exception from nova import flags from nova import log as logging from nova import quota from nova import rpc +from nova import utils from nova.db import base FLAGS = flags.FLAGS @@ -39,7 +38,14 @@ LOG = logging.getLogger('nova.volume') class API(base.Base): """API for interacting with the volume manager.""" - def create(self, context, size, name, description): + def create(self, context, size, snapshot_id, name, description): + if snapshot_id != None: + snapshot = self.get_snapshot(context, snapshot_id) + if snapshot['status'] != "available": + raise exception.ApiError( + _("Snapshot status must be available")) + size = snapshot['volume_size'] + if quota.allowed_volumes(context, 1, size) < 1: pid = context.project_id LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" @@ -51,6 +57,7 @@ class API(base.Base): 'size': size, 'user_id': context.user_id, 'project_id': context.project_id, + 'snapshot_id': snapshot_id, 'availability_zone': FLAGS.storage_availability_zone, 'status': "creating", 'attach_status': "detached", @@ -62,14 +69,15 @@ class API(base.Base): FLAGS.scheduler_topic, {"method": "create_volume", "args": {"topic": FLAGS.volume_topic, - "volume_id": volume['id']}}) + "volume_id": volume['id'], + "snapshot_id": snapshot_id}}) return volume def delete(self, context, volume_id): volume = self.get(context, volume_id) if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) host = volume['host'] diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 21cc228c9..87e13277f 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -133,6 +133,12 @@ class VolumeDriver(object): changes to the volume object to be persisted.""" self._create_volume(volume['name'], self._sizestr(volume['size'])) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a volume from a snapshot.""" + self._create_volume(volume['name'], self._sizestr(volume['size'])) + self._copy_volume(self.local_path(snapshot), self.local_path(volume), + snapshot['volume_size']) + def delete_volume(self, volume): """Deletes a logical volume.""" if self._volume_not_present(volume['name']): @@ -665,6 +671,13 @@ class SheepdogDriver(VolumeDriver): "sheepdog:%s" % volume['name'], self._sizestr(volume['size'])) + def create_volume_from_snapshot(self, volume, snapshot): + """Creates a sheepdog volume from a snapshot.""" + self._try_execute('qemu-img', 'create', '-b', + "sheepdog:%s:%s" % (snapshot['volume_name'], + snapshot['name']), + "sheepdog:%s" % volume['name']) + def delete_volume(self, volume): """Deletes a logical volume""" self._try_execute('collie', 'vdi', 'delete', volume['name']) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 40a104d35..798bd379a 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -42,8 +42,6 @@ intact. """ -import datetime - from nova import context from nova import exception @@ -90,7 +88,7 @@ class VolumeManager(manager.SchedulerDependentManager): else: LOG.info(_("volume %s: skipping export"), volume['name']) - def create_volume(self, context, volume_id): + def create_volume(self, context, volume_id, snapshot_id=None): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) @@ -108,7 +106,13 @@ class VolumeManager(manager.SchedulerDependentManager): vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) - model_update = self.driver.create_volume(volume_ref) + if snapshot_id == None: + model_update = self.driver.create_volume(volume_ref) + else: + snapshot_ref = self.db.snapshot_get(context, snapshot_id) + model_update = self.driver.create_volume_from_snapshot( + volume_ref, + snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) @@ -121,7 +125,7 @@ class VolumeManager(manager.SchedulerDependentManager): volume_ref['id'], {'status': 'error'}) raise - now = datetime.datetime.utcnow() + now = utils.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) diff --git a/nova/wsgi.py b/nova/wsgi.py index ea9bb963d..33ba852bc 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -85,36 +85,7 @@ class Server(object): class Request(webob.Request): - - def best_match_content_type(self): - """Determine the most acceptable content-type. - - Based on the query extension then the Accept header. - - """ - parts = self.path.rsplit('.', 1) - - if len(parts) > 1: - format = parts[1] - if format in ['json', 'xml']: - return 'application/{0}'.format(parts[1]) - - ctypes = ['application/json', 'application/xml'] - bm = self.accept.best_match(ctypes) - - return bm or 'application/json' - - def get_content_type(self): - allowed_types = ("application/xml", "application/json") - if not "Content-Type" in self.headers: - msg = _("Missing Content-Type") - LOG.debug(msg) - raise webob.exc.HTTPBadRequest(msg) - type = self.content_type - if type in allowed_types: - return type - LOG.debug(_("Wrong Content-Type: %s") % type) - raise webob.exc.HTTPBadRequest("Invalid content type") + pass class Application(object): @@ -289,8 +260,8 @@ class Router(object): Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as - well and have your controller be a wsgi.Controller, who will route - the request to the action method. + well and have your controller be an object that can route + the request to the action-specific method. Examples: mapper = routes.Mapper() @@ -338,223 +309,6 @@ class Router(object): return app -class Controller(object): - """WSGI app that dispatched to methods. - - WSGI app that reads routing information supplied by RoutesMiddleware - and calls the requested action method upon itself. All action methods - must, in addition to their normal parameters, accept a 'req' argument - which is the incoming wsgi.Request. They raise a webob.exc exception, - or return a dict which will be serialized by requested content type. - - """ - - @webob.dec.wsgify(RequestClass=Request) - def __call__(self, req): - """Call the method specified in req.environ by RoutesMiddleware.""" - arg_dict = req.environ['wsgiorg.routing_args'][1] - action = arg_dict['action'] - method = getattr(self, action) - LOG.debug("%s %s" % (req.method, req.url)) - del arg_dict['controller'] - del arg_dict['action'] - if 'format' in arg_dict: - del arg_dict['format'] - arg_dict['req'] = req - result = method(**arg_dict) - - if type(result) is dict: - content_type = req.best_match_content_type() - default_xmlns = self.get_default_xmlns(req) - body = self._serialize(result, content_type, default_xmlns) - - response = webob.Response() - response.headers['Content-Type'] = content_type - response.body = body - msg_dict = dict(url=req.url, status=response.status_int) - msg = _("%(url)s returned with HTTP %(status)d") % msg_dict - LOG.debug(msg) - return response - else: - return result - - def _serialize(self, data, content_type, default_xmlns): - """Serialize the given dict to the provided content_type. - - Uses self._serialization_metadata if it exists, which is a dict mapping - MIME types to information needed to serialize to that type. - - """ - _metadata = getattr(type(self), '_serialization_metadata', {}) - - serializer = Serializer(_metadata, default_xmlns) - try: - return serializer.serialize(data, content_type) - except exception.InvalidContentType: - raise webob.exc.HTTPNotAcceptable() - - def _deserialize(self, data, content_type): - """Deserialize the request body to the specefied content type. - - Uses self._serialization_metadata if it exists, which is a dict mapping - MIME types to information needed to serialize to that type. - - """ - _metadata = getattr(type(self), '_serialization_metadata', {}) - serializer = Serializer(_metadata) - return serializer.deserialize(data, content_type) - - def get_default_xmlns(self, req): - """Provide the XML namespace to use if none is otherwise specified.""" - return None - - -class Serializer(object): - """Serializes and deserializes dictionaries to certain MIME types.""" - - def __init__(self, metadata=None, default_xmlns=None): - """Create a serializer based on the given WSGI environment. - - 'metadata' is an optional dict mapping MIME types to information - needed to serialize a dictionary to that type. - - """ - self.metadata = metadata or {} - self.default_xmlns = default_xmlns - - def _get_serialize_handler(self, content_type): - handlers = { - 'application/json': self._to_json, - 'application/xml': self._to_xml, - } - - try: - return handlers[content_type] - except Exception: - raise exception.InvalidContentType(content_type=content_type) - - def serialize(self, data, content_type): - """Serialize a dictionary into the specified content type.""" - return self._get_serialize_handler(content_type)(data) - - def deserialize(self, datastring, content_type): - """Deserialize a string to a dictionary. - - The string must be in the format of a supported MIME type. - - """ - return self.get_deserialize_handler(content_type)(datastring) - - def get_deserialize_handler(self, content_type): - handlers = { - 'application/json': self._from_json, - 'application/xml': self._from_xml, - } - - try: - return handlers[content_type] - except Exception: - raise exception.InvalidContentType(content_type=content_type) - - def _from_json(self, datastring): - return utils.loads(datastring) - - def _from_xml(self, datastring): - xmldata = self.metadata.get('application/xml', {}) - plurals = set(xmldata.get('plurals', {})) - node = minidom.parseString(datastring).childNodes[0] - return {node.nodeName: self._from_xml_node(node, plurals)} - - def _from_xml_node(self, node, listnames): - """Convert a minidom node to a simple Python type. - - listnames is a collection of names of XML nodes whose subnodes should - be considered list items. - - """ - if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: - return node.childNodes[0].nodeValue - elif node.nodeName in listnames: - return [self._from_xml_node(n, listnames) for n in node.childNodes] - else: - result = dict() - for attr in node.attributes.keys(): - result[attr] = node.attributes[attr].nodeValue - for child in node.childNodes: - if child.nodeType != node.TEXT_NODE: - result[child.nodeName] = self._from_xml_node(child, - listnames) - return result - - def _to_json(self, data): - return utils.dumps(data) - - def _to_xml(self, data): - metadata = self.metadata.get('application/xml', {}) - # We expect data to contain a single key which is the XML root. - root_key = data.keys()[0] - doc = minidom.Document() - node = self._to_xml_node(doc, metadata, root_key, data[root_key]) - - xmlns = node.getAttribute('xmlns') - if not xmlns and self.default_xmlns: - node.setAttribute('xmlns', self.default_xmlns) - - return node.toprettyxml(indent=' ') - - def _to_xml_node(self, doc, metadata, nodename, data): - """Recursive method to convert data members to XML nodes.""" - result = doc.createElement(nodename) - - # Set the xml namespace if one is specified - # TODO(justinsb): We could also use prefixes on the keys - xmlns = metadata.get('xmlns', None) - if xmlns: - result.setAttribute('xmlns', xmlns) - - if type(data) is list: - collections = metadata.get('list_collections', {}) - if nodename in collections: - metadata = collections[nodename] - for item in data: - node = doc.createElement(metadata['item_name']) - node.setAttribute(metadata['item_key'], str(item)) - result.appendChild(node) - return result - singular = metadata.get('plurals', {}).get(nodename, None) - if singular is None: - if nodename.endswith('s'): - singular = nodename[:-1] - else: - singular = 'item' - for item in data: - node = self._to_xml_node(doc, metadata, singular, item) - result.appendChild(node) - elif type(data) is dict: - collections = metadata.get('dict_collections', {}) - if nodename in collections: - metadata = collections[nodename] - for k, v in data.items(): - node = doc.createElement(metadata['item_name']) - node.setAttribute(metadata['item_key'], str(k)) - text = doc.createTextNode(str(v)) - node.appendChild(text) - result.appendChild(node) - return result - attrs = metadata.get('attributes', {}).get(nodename, {}) - for k, v in data.items(): - if k in attrs: - result.setAttribute(k, str(v)) - else: - node = self._to_xml_node(doc, metadata, k, v) - result.appendChild(node) - else: - # Type is atom - node = doc.createTextNode(str(data)) - result.appendChild(node) - return result - - def paste_config_file(basename): """Find the best location in the system for a paste config file. |
