From 049b89babe10068d3976f3f3a99b7dce120e2962 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 10 Aug 2010 18:17:44 -0400 Subject: work on a router that works with wsgi and non-wsgi routing --- nova/endpoint/rackspace.py | 27 ++++++++-------- nova/wsgi.py | 76 ++++++++++++++++++++++++++++++++++++++++++++++ tools/pip-requires | 3 ++ 3 files changed, 94 insertions(+), 12 deletions(-) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py index 75b828e91..b4e6cd823 100644 --- a/nova/endpoint/rackspace.py +++ b/nova/endpoint/rackspace.py @@ -45,18 +45,20 @@ class API(wsgi.Middleware): def __init__(self): super(API, self).__init__(Router(webob.exc.HTTPNotFound())) - def __call__(self, environ, start_response): + @webob.dec.wsgify + def __call__(self, req): + return self.application context = {} - if "HTTP_X_AUTH_TOKEN" in environ: + if "HTTP_X_AUTH_TOKEN" in req.environ: context['user'] = manager.AuthManager().get_user_from_access_key( - environ['HTTP_X_AUTH_TOKEN']) + req.environ['HTTP_X_AUTH_TOKEN']) if context['user']: context['project'] = manager.AuthManager().get_project( context['user'].name) if "user" not in context: - return webob.exc.HTTPForbidden()(environ, start_response) + return webob.exc.HTTPForbidden() environ['nova.context'] = context - return self.application(environ, start_response) + return self.application class Router(wsgi.Router): @@ -64,13 +66,14 @@ class Router(wsgi.Router): def _build_map(self): """Build routing map for authentication and cloud.""" - self._connect("/v1.0", controller=AuthenticationAPI()) - cloud = CloudServerAPI() - self._connect("/servers", controller=cloud.launch_server, - conditions={"method": ["POST"]}) - self._connect("/servers/{server_id}", controller=cloud.delete_server, - conditions={'method': ["DELETE"]}) - self._connect("/servers", controller=cloud) + self.map.resource("server", "servers", controller=CloudServerAPI()) + #self._connect("/v1.0", controller=AuthenticationAPI()) + #cloud = CloudServerAPI() + #self._connect("/servers", controller=cloud.launch_server, + # conditions={"method": ["POST"]}) + #self._connect("/servers/{server_id}", controller=cloud.delete_server, + # conditions={'method': ["DELETE"]}) + #self._connect("/servers", controller=cloud) class AuthenticationAPI(wsgi.Application): diff --git a/nova/wsgi.py b/nova/wsgi.py index 4fd6e59e3..271648105 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -40,6 +40,7 @@ def run_server(application, port): eventlet.wsgi.server(sock, application) +# TODO(gundlach): I think we should toss this class, now that it has no purpose. class Application(object): """Base WSGI application wrapper. Subclasses need to implement __call__.""" @@ -140,6 +141,81 @@ class ParsedRoutes(Middleware): app = environ['wsgiorg.routing_args'][1]['controller'] return app(environ, start_response) +class MichaelRouter(object): + """ + My attempt at a routing class. Just override __init__ to call + super, then set up routes in self.map. + """ + + def __init__(self): + self.map = routes.Mapper() + self._router = routes.middleware.RoutesMiddleware(self._proceed, self.map) + + @webob.dec.wsgify + def __call__(self, req): + """ + Route the incoming request to a controller based on self.map. + If no match, return a 404. + """ + return self._router + + @webob.dec.wsgify + def _proceed(self, req): + """ + Called by self._router after matching the incoming request to a route + and putting the information into req.environ. + """ + if req.environ['routes.route'] is None: + return webob.exc.HTTPNotFound() + match = environ['wsgiorg.routing_args'][1] + if match.get('_is_wsgi', False): + wsgiapp = match['controller'] + return req.get_response(wsgiapp) + else: + # TODO(gundlach): doubt this is the right way -- and it really + # feels like this code should exist somewhere already on the + # internet + controller, action = match['controller'], match['action'] + delete match['controller'] + delete match['action'] + return _as_response(getattr(controller, action)(**match)) + + controller = environ['wsgiorg.routing_args'][1]['controller'] + self._dispatch(controller) + + def _as_response(self, result): + """ + When routing to a non-wsgi controller+action, its result will + be passed here before returning up the WSGI chain to be converted + into a webob.Response + + + + + +class ApiVersionRouter(MichaelRouter): + + def __init__(self): + super(ApiVersionRouter, self).__init__(self) + + self.map.connect(None, "/v1.0/{path_info:.*}", controller=RsApiRouter()) + self.map.connect(None, "/ec2/{path_info:.*}", controller=Ec2ApiRouter()) + +class RsApiRouter(MichaelRouter): + def __init__(self): + super(RsApiRouter, self).__init__(self) + + self.map.resource("server", "servers", controller=CloudServersServerApi()) + self.map.resource("image", "images", controller=CloudServersImageApi()) + self.map.resource("flavor", "flavors", controller=CloudServersFlavorApi()) + self.map.resource("sharedipgroup", "sharedipgroups", + controller=CloudServersSharedIpGroupApi()) + +class Ec2ApiRouter(object): + def __getattr__(self, key): + return lambda *x: {'dummy response': 'i am a dummy response'} +CloudServersServerApi = CloudServersImageApi = CloudServersFlavorApi = \ + CloudServersSharedIpGroupApi = Ec2ApiRouter class Router(Middleware): # pylint: disable-msg=R0921 """Wrapper to help setup routes.middleware.RoutesMiddleware.""" diff --git a/tools/pip-requires b/tools/pip-requires index 4eb47ca2b..2317907d1 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -4,11 +4,14 @@ amqplib==0.6.1 anyjson==0.2.4 boto==2.0b1 carrot==0.10.5 +eventlet==0.9.10 lockfile==0.8 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 +routes==1.12.3 tornado==1.0 +webob==0.9.8 wsgiref==0.1.2 zope.interface==3.6.1 mox==0.5.0 -- cgit From 1637c33927672a6edc9ad7a994787669ea47f602 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 11 Aug 2010 09:46:08 -0400 Subject: Serializing in middleware after all... by tying to the router. maybe a good idea? --- nova/wsgi.py | 113 +++++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 72 insertions(+), 41 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index 271648105..c511a3f06 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -141,15 +141,24 @@ class ParsedRoutes(Middleware): app = environ['wsgiorg.routing_args'][1]['controller'] return app(environ, start_response) -class MichaelRouter(object): +class MichaelRouterMiddleware(object): """ - My attempt at a routing class. Just override __init__ to call - super, then set up routes in self.map. + Router that maps incoming requests to WSGI apps or to standard + controllers+actions. The response will be a WSGI response; standard + controllers+actions will by default have their results serialized + to the requested Content Type, or you can subclass and override + _to_webob_response to customize this. """ - def __init__(self): - self.map = routes.Mapper() - self._router = routes.middleware.RoutesMiddleware(self._proceed, self.map) + def __init__(self, map): + """ + Create a router for the given routes.Mapper. It may contain standard + routes (i.e. specifying controllers and actions), or may route to a + WSGI app by instead specifying a wsgi_app=SomeApp() parameter in + map.connect(). + """ + self.map = map + self._router = routes.middleware.RoutesMiddleware(self.__proceed, self.map) @webob.dec.wsgify def __call__(self, req): @@ -160,62 +169,84 @@ class MichaelRouter(object): return self._router @webob.dec.wsgify - def _proceed(self, req): - """ - Called by self._router after matching the incoming request to a route - and putting the information into req.environ. - """ + @staticmethod + def __proceed(req): + # Called by self._router after matching the incoming request to a route + # and putting the information into req.environ. Either returns 404, the + # routed WSGI app, or _to_webob_response(the action result). + if req.environ['routes.route'] is None: return webob.exc.HTTPNotFound() match = environ['wsgiorg.routing_args'][1] - if match.get('_is_wsgi', False): - wsgiapp = match['controller'] - return req.get_response(wsgiapp) + if 'wsgi_app' in match: + return match['wsgi_app'] else: - # TODO(gundlach): doubt this is the right way -- and it really - # feels like this code should exist somewhere already on the - # internet + kwargs = match.copy() controller, action = match['controller'], match['action'] - delete match['controller'] - delete match['action'] - return _as_response(getattr(controller, action)(**match)) + delete kwargs['controller'] + delete kwargs['action'] + return _to_webob_response(req, getattr(controller, action)(**kwargs)) - controller = environ['wsgiorg.routing_args'][1]['controller'] - self._dispatch(controller) - - def _as_response(self, result): + def _to_webob_response(self, req, result): + """ + When routing to a non-WSGI controller+action, the webob.Request and the + action's result will be passed here to be converted into a + webob.Response before returning up the WSGI chain. By default it + serializes to the requested Content Type. """ - When routing to a non-wsgi controller+action, its result will - be passed here before returning up the WSGI chain to be converted - into a webob.Response + return Serializer(req).serialize(result) +class Serializer(object): + """ + Serializes a dictionary to a Content Type specified by a WSGI environment. + """ + def __init__(self, environ): + """Create a serializer based on the given WSGI environment.""" + self.environ = environ + def serialize(self, data): + req = webob.Request(environ) + # TODO(gundlach): temp + if 'applicatio/json' in req.accept): + import json + return json.dumps(result) + else: + return '' + repr(data) + '' -class ApiVersionRouter(MichaelRouter): +class ApiVersionRouter(MichaelRouterMiddleware): def __init__(self): - super(ApiVersionRouter, self).__init__(self) + map = routes.Mapper() - self.map.connect(None, "/v1.0/{path_info:.*}", controller=RsApiRouter()) - self.map.connect(None, "/ec2/{path_info:.*}", controller=Ec2ApiRouter()) + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app=RsApiRouter()) + map.connect(None, "/ec2/{path_info:.*}", wsgi_app=Ec2ApiRouter()) -class RsApiRouter(MichaelRouter): + super(ApiVersionRouter, self).__init__(self, map) + +class RsApiRouter(MichaelRouterMiddleware): def __init__(self): - super(RsApiRouter, self).__init__(self) + map = routes.Mapper() + + map.resource("server", "servers", controller=ServerController()) + map.resource("image", "images", controller=ImageController()) + map.resource("flavor", "flavors", controller=FlavorController()) + map.resource("sharedipgroup", "sharedipgroups", + controller=SharedIpGroupController()) - self.map.resource("server", "servers", controller=CloudServersServerApi()) - self.map.resource("image", "images", controller=CloudServersImageApi()) - self.map.resource("flavor", "flavors", controller=CloudServersFlavorApi()) - self.map.resource("sharedipgroup", "sharedipgroups", - controller=CloudServersSharedIpGroupApi()) + super(RsApiRouter, self).__init__(self, map) class Ec2ApiRouter(object): + @webob.dec.wsgify + def __call__(self, req): + return 'dummy response' + +class ServerController(object): def __getattr__(self, key): - return lambda *x: {'dummy response': 'i am a dummy response'} -CloudServersServerApi = CloudServersImageApi = CloudServersFlavorApi = \ - CloudServersSharedIpGroupApi = Ec2ApiRouter + return {'dummy': 'dummy response'} +ImageController = FlavorController = SharedIpGroupController = ServerController + class Router(Middleware): # pylint: disable-msg=R0921 """Wrapper to help setup routes.middleware.RoutesMiddleware.""" -- cgit From a0fb0fdf1e899488f0717bea6ee2cad58120070b Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Wed, 11 Aug 2010 14:46:43 -0400 Subject: Working router that can target WSGI middleware or a standard controller+action --- nova/wsgi.py | 205 ++++++++++++++++++++++++++++------------------------------- 1 file changed, 98 insertions(+), 107 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index c511a3f06..81890499e 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -29,6 +29,8 @@ import eventlet.wsgi eventlet.patcher.monkey_patch(all=False, socket=True) import routes import routes.middleware +import webob.dec +import webob.exc logging.getLogger("routes.middleware").addHandler(logging.StreamHandler()) @@ -89,75 +91,80 @@ class Middleware(Application): # pylint: disable-msg=W0223 class Debug(Middleware): - """Helper class that can be insertd into any WSGI application chain + """Helper class that can be inserted into any WSGI application chain to get information about the request and response.""" - def __call__(self, environ, start_response): - for key, value in environ.items(): + @webob.dec.wsgify + def __call__(self, req): + print ("*" * 40) + " REQUEST ENVIRON" + for key, value in req.environ.items(): print key, "=", value print - wrapper = debug_start_response(start_response) - return debug_print_body(self.application(environ, wrapper)) - - -def debug_start_response(start_response): - """Wrap the start_response to capture when called.""" + resp = req.get_response(self.application) - def wrapper(status, headers, exc_info=None): - """Print out all headers when start_response is called.""" - print status - for (key, value) in headers: + print ("*" * 40) + " RESPONSE HEADERS" + for (key, value) in resp.headers: print key, "=", value print - start_response(status, headers, exc_info) - return wrapper + resp.app_iter = self.print_generator(resp.app_iter) + return resp -def debug_print_body(body): - """Print the body of the response as it is sent back.""" + @staticmethod + def print_generator(app_iter): + """ + Iterator that prints the contents of a wrapper string iterator + when iterated. + """ + print ("*" * 40) + "BODY" + for part in app_iter: + sys.stdout.write(part) + sys.stdout.flush() + yield part + print - class Wrapper(object): - """Iterate through all the body parts and print before returning.""" - def __iter__(self): - for part in body: - sys.stdout.write(part) - sys.stdout.flush() - yield part - print +class Router(object): + """ + WSGI middleware that maps incoming requests to targets. + + Non-WSGI-app targets have their results converted to a WSGI response + automatically -- by default, they are serialized according to the Content + Type from the request. This behavior can be changed by overriding + _to_webob_response(). + """ + + def __init__(self, map, targets): + """ + Create a router for the given routes.Mapper `map`. - return Wrapper() + Each route in `map` must contain either + - a 'wsgi_app' string or + - a 'controller' string and an 'action' string. + 'wsgi_app' is a key into the `target` dictionary whose value + is a WSGI app. 'controller' is a key into `target' whose value is + a class instance containing the method specified by 'action'. -class ParsedRoutes(Middleware): - """Processed parsed routes from routes.middleware.RoutesMiddleware - and call either the controller if found or the default application - otherwise.""" + Examples: + map = routes.Mapper() + targets = { "servers": ServerController(), "blog": BlogWsgiApp() } - def __call__(self, environ, start_response): - if environ['routes.route'] is None: - return self.application(environ, start_response) - app = environ['wsgiorg.routing_args'][1]['controller'] - return app(environ, start_response) + # Explicit mapping of one route to a controller+action + map.connect(None, "/serverlist", controller="servers", action="list") -class MichaelRouterMiddleware(object): - """ - Router that maps incoming requests to WSGI apps or to standard - controllers+actions. The response will be a WSGI response; standard - controllers+actions will by default have their results serialized - to the requested Content Type, or you can subclass and override - _to_webob_response to customize this. - """ - - def __init__(self, map): - """ - Create a router for the given routes.Mapper. It may contain standard - routes (i.e. specifying controllers and actions), or may route to a - WSGI app by instead specifying a wsgi_app=SomeApp() parameter in - map.connect(). + # Controller string is implicitly equal to 2nd param here, and + # actions are all implicitly defined + map.resource("server", "servers") + + # Pointing to a WSGI app. You'll need to specify the {path_info:.*} + # parameter so the target app can work with just his section of the + # URL. + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app="blog") """ self.map = map + self.targets = targets self._router = routes.middleware.RoutesMiddleware(self.__proceed, self.map) @webob.dec.wsgify @@ -169,23 +176,28 @@ class MichaelRouterMiddleware(object): return self._router @webob.dec.wsgify - @staticmethod - def __proceed(req): + def __proceed(self, req): # Called by self._router after matching the incoming request to a route # and putting the information into req.environ. Either returns 404, the # routed WSGI app, or _to_webob_response(the action result). if req.environ['routes.route'] is None: return webob.exc.HTTPNotFound() - match = environ['wsgiorg.routing_args'][1] + match = req.environ['wsgiorg.routing_args'][1] if 'wsgi_app' in match: - return match['wsgi_app'] + app_name = match['wsgi_app'] + app = self.targets[app_name] + return app else: kwargs = match.copy() - controller, action = match['controller'], match['action'] - delete kwargs['controller'] - delete kwargs['action'] - return _to_webob_response(req, getattr(controller, action)(**kwargs)) + controller_name, action = match['controller'], match['action'] + del kwargs['controller'] + del kwargs['action'] + + controller = self.targets[controller_name] + method = getattr(controller, action) + result = method(**kwargs) + return self._to_webob_response(req, result) def _to_webob_response(self, req, result): """ @@ -194,7 +206,8 @@ class MichaelRouterMiddleware(object): webob.Response before returning up the WSGI chain. By default it serializes to the requested Content Type. """ - return Serializer(req).serialize(result) + return Serializer(req.environ).serialize(result) + class Serializer(object): """ @@ -206,75 +219,53 @@ class Serializer(object): self.environ = environ def serialize(self, data): - req = webob.Request(environ) + req = webob.Request(self.environ) # TODO(gundlach): temp - if 'applicatio/json' in req.accept): + if req.accept and 'application/json' in req.accept: import json - return json.dumps(result) + return json.dumps(data) else: return '' + repr(data) + '' -class ApiVersionRouter(MichaelRouterMiddleware): +class ApiVersionRouter(Router): def __init__(self): map = routes.Mapper() - map.connect(None, "/v1.0/{path_info:.*}", wsgi_app=RsApiRouter()) - map.connect(None, "/ec2/{path_info:.*}", wsgi_app=Ec2ApiRouter()) + map.connect(None, "/v1.0/{path_info:.*}", wsgi_app="rs") + map.connect(None, "/ec2/{path_info:.*}", wsgi_app="ec2") + + targets = { "rs": RsApiRouter(), "ec2": Ec2ApiRouter() } - super(ApiVersionRouter, self).__init__(self, map) + super(ApiVersionRouter, self).__init__(map, targets) -class RsApiRouter(MichaelRouterMiddleware): +class RsApiRouter(Router): def __init__(self): map = routes.Mapper() - map.resource("server", "servers", controller=ServerController()) - map.resource("image", "images", controller=ImageController()) - map.resource("flavor", "flavors", controller=FlavorController()) - map.resource("sharedipgroup", "sharedipgroups", - controller=SharedIpGroupController()) + map.resource("server", "servers") + map.resource("image", "images") + map.resource("flavor", "flavors") + map.resource("sharedipgroup", "sharedipgroups") - super(RsApiRouter, self).__init__(self, map) + targets = { + 'servers': ServerController(), + 'images': ImageController(), + 'flavors': FlavorController(), + 'sharedipgroups': SharedIpGroupController() + } + super(RsApiRouter, self).__init__(map, targets) + +# TODO(gundlach): temp class Ec2ApiRouter(object): @webob.dec.wsgify def __call__(self, req): return 'dummy response' - +# TODO(gundlach): temp class ServerController(object): def __getattr__(self, key): - return {'dummy': 'dummy response'} + return lambda **args: {key: 'dummy response for %s' % repr(args)} +# TODO(gundlach): temp ImageController = FlavorController = SharedIpGroupController = ServerController - - -class Router(Middleware): # pylint: disable-msg=R0921 - """Wrapper to help setup routes.middleware.RoutesMiddleware.""" - - def __init__(self, application): - self.map = routes.Mapper() - self._build_map() - application = ParsedRoutes(application) - application = routes.middleware.RoutesMiddleware(application, self.map) - super(Router, self).__init__(application) - - def __call__(self, environ, start_response): - return self.application(environ, start_response) - - def _build_map(self): - """Method to create new connections for the routing map.""" - raise NotImplementedError("You must implement _build_map") - - def _connect(self, *args, **kwargs): - """Wrapper for the map.connect method.""" - self.map.connect(*args, **kwargs) - - -def route_args(application): - """Decorator to make grabbing routing args more convenient.""" - - def wrapper(self, req): - """Call application with req and parsed routing args from.""" - return application(self, req, req.environ['wsgiorg.routing_args'][1]) - - return wrapper -- cgit From 2e753b033dae6270674c0397be8e01bd2ff47980 Mon Sep 17 00:00:00 2001 From: Matthew Dietz Date: Wed, 11 Aug 2010 15:27:27 -0500 Subject: Prototype implementation of Servers controller --- nova/endpoint/aws/cloud.py | 729 +++++++++++++++++++++ nova/endpoint/aws/images.py | 95 +++ nova/endpoint/cloud.py | 729 --------------------- nova/endpoint/images.py | 95 --- nova/endpoint/rackspace.py | 186 ------ nova/endpoint/rackspace/controllers/base.py | 9 + nova/endpoint/rackspace/controllers/flavors.py | 0 nova/endpoint/rackspace/controllers/images.py | 0 nova/endpoint/rackspace/controllers/servers.py | 72 ++ .../rackspace/controllers/shared_ip_groups.py | 0 nova/endpoint/rackspace/rackspace.py | 183 ++++++ 11 files changed, 1088 insertions(+), 1010 deletions(-) create mode 100644 nova/endpoint/aws/cloud.py create mode 100644 nova/endpoint/aws/images.py delete mode 100644 nova/endpoint/cloud.py delete mode 100644 nova/endpoint/images.py delete mode 100644 nova/endpoint/rackspace.py create mode 100644 nova/endpoint/rackspace/controllers/base.py create mode 100644 nova/endpoint/rackspace/controllers/flavors.py create mode 100644 nova/endpoint/rackspace/controllers/images.py create mode 100644 nova/endpoint/rackspace/controllers/servers.py create mode 100644 nova/endpoint/rackspace/controllers/shared_ip_groups.py create mode 100644 nova/endpoint/rackspace/rackspace.py diff --git a/nova/endpoint/aws/cloud.py b/nova/endpoint/aws/cloud.py new file mode 100644 index 000000000..878d54a15 --- /dev/null +++ b/nova/endpoint/aws/cloud.py @@ -0,0 +1,729 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Cloud Controller: Implementation of EC2 REST API calls, which are +dispatched to other nodes via AMQP RPC. State is via distributed +datastore. +""" + +import base64 +import logging +import os +import time +from twisted.internet import defer + +from nova import datastore +from nova import exception +from nova import flags +from nova import rpc +from nova import utils +from nova.auth import rbac +from nova.auth import manager +from nova.compute import model +from nova.compute.instance_types import INSTANCE_TYPES +from nova.endpoint import images +from nova.network import service as network_service +from nova.network import model as network_model +from nova.volume import service + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + +def _gen_key(user_id, key_name): + """ Tuck this into AuthManager """ + try: + mgr = manager.AuthManager() + private_key, fingerprint = mgr.generate_key_pair(user_id, key_name) + except Exception as ex: + return {'exception': ex} + return {'private_key': private_key, 'fingerprint': fingerprint} + + +class CloudController(object): + """ CloudController provides the critical dispatch between + inbound API calls through the endpoint and messages + sent to the other nodes. +""" + def __init__(self): + self.instdir = model.InstanceDirectory() + self.setup() + + @property + def instances(self): + """ All instances in the system, as dicts """ + return self.instdir.all + + @property + def volumes(self): + """ returns a list of all volumes """ + for volume_id in datastore.Redis.instance().smembers("volumes"): + volume = service.get_volume(volume_id) + yield volume + + def __str__(self): + return 'CloudController' + + def setup(self): + """ Ensure the keychains and folders exist. """ + # Create keys folder, if it doesn't exist + if not os.path.exists(FLAGS.keys_path): + os.makedirs(os.path.abspath(FLAGS.keys_path)) + # Gen root CA, if we don't have one + root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) + if not os.path.exists(root_ca_path): + start = os.getcwd() + os.chdir(FLAGS.ca_path) + utils.runthis("Generating root CA: %s", "sh genrootca.sh") + os.chdir(start) + # TODO: Do this with M2Crypto instead + + def get_instance_by_ip(self, ip): + return self.instdir.by_ip(ip) + + def _get_mpi_data(self, project_id): + result = {} + for instance in self.instdir.all: + if instance['project_id'] == project_id: + line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) + if instance['key_name'] in result: + result[instance['key_name']].append(line) + else: + result[instance['key_name']] = [line] + return result + + def get_metadata(self, ip): + i = self.get_instance_by_ip(ip) + if i is None: + return None + mpi = self._get_mpi_data(i['project_id']) + if i['key_name']: + keys = { + '0': { + '_name': i['key_name'], + 'openssh-key': i['key_data'] + } + } + else: + keys = '' + data = { + 'user-data': base64.b64decode(i['user_data']), + 'meta-data': { + 'ami-id': i['image_id'], + 'ami-launch-index': i['ami_launch_index'], + 'ami-manifest-path': 'FIXME', # image property + 'block-device-mapping': { # TODO: replace with real data + 'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': '/dev/sda1', + 'swap': 'sda3' + }, + 'hostname': i['private_dns_name'], # is this public sometimes? + 'instance-action': 'none', + 'instance-id': i['instance_id'], + 'instance-type': i.get('instance_type', ''), + 'local-hostname': i['private_dns_name'], + 'local-ipv4': i['private_dns_name'], # TODO: switch to IP + 'kernel-id': i.get('kernel_id', ''), + 'placement': { + 'availaibility-zone': i.get('availability_zone', 'nova'), + }, + 'public-hostname': i.get('dns_name', ''), + 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP + 'public-keys' : keys, + 'ramdisk-id': i.get('ramdisk_id', ''), + 'reservation-id': i['reservation_id'], + 'security-groups': i.get('groups', ''), + 'mpi': mpi + } + } + if False: # TODO: store ancestor ids + data['ancestor-ami-ids'] = [] + if i.get('product_codes', None): + data['product-codes'] = i['product_codes'] + return data + + @rbac.allow('all') + def describe_availability_zones(self, context, **kwargs): + return {'availabilityZoneInfo': [{'zoneName': 'nova', + 'zoneState': 'available'}]} + + @rbac.allow('all') + def describe_regions(self, context, region_name=None, **kwargs): + # TODO(vish): region_name is an array. Support filtering + return {'regionInfo': [{'regionName': 'nova', + 'regionUrl': FLAGS.ec2_url}]} + + @rbac.allow('all') + def describe_snapshots(self, + context, + snapshot_id=None, + owner=None, + restorable_by=None, + **kwargs): + return {'snapshotSet': [{'snapshotId': 'fixme', + 'volumeId': 'fixme', + 'status': 'fixme', + 'startTime': 'fixme', + 'progress': 'fixme', + 'ownerId': 'fixme', + 'volumeSize': 0, + 'description': 'fixme'}]} + + @rbac.allow('all') + def describe_key_pairs(self, context, key_name=None, **kwargs): + key_pairs = context.user.get_key_pairs() + if not key_name is None: + key_pairs = [x for x in key_pairs if x.name in key_name] + + result = [] + for key_pair in key_pairs: + # filter out the vpn keys + suffix = FLAGS.vpn_key_suffix + if context.user.is_admin() or not key_pair.name.endswith(suffix): + result.append({ + 'keyName': key_pair.name, + 'keyFingerprint': key_pair.fingerprint, + }) + + return { 'keypairsSet': result } + + @rbac.allow('all') + def create_key_pair(self, context, key_name, **kwargs): + try: + d = defer.Deferred() + p = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + d.errback(kwargs['exception']) + return + d.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + p.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return d + + except manager.UserError as e: + raise + + @rbac.allow('all') + def delete_key_pair(self, context, key_name, **kwargs): + context.user.delete_key_pair(key_name) + # aws returns true even if the key doens't exist + return True + + @rbac.allow('all') + def describe_security_groups(self, context, group_names, **kwargs): + groups = { 'securityGroupSet': [] } + + # Stubbed for now to unblock other things. + return groups + + @rbac.allow('netadmin') + def create_security_group(self, context, group_name, **kwargs): + return True + + @rbac.allow('netadmin') + def delete_security_group(self, context, group_name, **kwargs): + return True + + @rbac.allow('projectmanager', 'sysadmin') + def get_console_output(self, context, instance_id, **kwargs): + # instance_id is passed in as a list of instances + instance = self._get_instance(context, instance_id[0]) + return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "get_console_output", + "args" : {"instance_id": instance_id[0]}}) + + def _get_user_id(self, context): + if context and context.user: + return context.user.id + else: + return None + + @rbac.allow('projectmanager', 'sysadmin') + def describe_volumes(self, context, **kwargs): + volumes = [] + for volume in self.volumes: + if context.user.is_admin() or volume['project_id'] == context.project.id: + v = self.format_volume(context, volume) + volumes.append(v) + return defer.succeed({'volumeSet': volumes}) + + def format_volume(self, context, volume): + v = {} + v['volumeId'] = volume['volume_id'] + v['status'] = volume['status'] + v['size'] = volume['size'] + v['availabilityZone'] = volume['availability_zone'] + v['createTime'] = volume['create_time'] + if context.user.is_admin(): + v['status'] = '%s (%s, %s, %s, %s)' % ( + volume.get('status', None), + volume.get('user_id', None), + volume.get('node_name', None), + volume.get('instance_id', ''), + volume.get('mountpoint', '')) + if volume['attach_status'] == 'attached': + v['attachmentSet'] = [{'attachTime': volume['attach_time'], + 'deleteOnTermination': volume['delete_on_termination'], + 'device' : volume['mountpoint'], + 'instanceId' : volume['instance_id'], + 'status' : 'attached', + 'volume_id' : volume['volume_id']}] + else: + v['attachmentSet'] = [{}] + return v + + @rbac.allow('projectmanager', 'sysadmin') + @defer.inlineCallbacks + def create_volume(self, context, size, **kwargs): + # TODO(vish): refactor this to create the volume object here and tell service to create it + result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", + "args" : {"size": size, + "user_id": context.user.id, + "project_id": context.project.id}}) + # NOTE(vish): rpc returned value is in the result key in the dictionary + volume = self._get_volume(context, result['result']) + defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) + + def _get_address(self, context, public_ip): + # FIXME(vish) this should move into network.py + address = network_model.PublicAddress.lookup(public_ip) + if address and (context.user.is_admin() or address['project_id'] == context.project.id): + return address + raise exception.NotFound("Address at ip %s not found" % public_ip) + + def _get_image(self, context, image_id): + """passes in context because + objectstore does its own authorization""" + result = images.list(context, [image_id]) + if not result: + raise exception.NotFound('Image %s could not be found' % image_id) + image = result[0] + return image + + def _get_instance(self, context, instance_id): + for instance in self.instdir.all: + if instance['instance_id'] == instance_id: + if context.user.is_admin() or instance['project_id'] == context.project.id: + return instance + raise exception.NotFound('Instance %s could not be found' % instance_id) + + def _get_volume(self, context, volume_id): + volume = service.get_volume(volume_id) + if context.user.is_admin() or volume['project_id'] == context.project.id: + return volume + raise exception.NotFound('Volume %s could not be found' % volume_id) + + @rbac.allow('projectmanager', 'sysadmin') + def attach_volume(self, context, volume_id, instance_id, device, **kwargs): + volume = self._get_volume(context, volume_id) + if volume['status'] == "attached": + raise exception.ApiError("Volume is already attached") + # TODO(vish): looping through all volumes is slow. We should probably maintain an index + for vol in self.volumes: + if vol['instance_id'] == instance_id and vol['mountpoint'] == device: + raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint'])) + volume.start_attach(instance_id, device) + instance = self._get_instance(context, instance_id) + compute_node = instance['node_name'] + rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), + {"method": "attach_volume", + "args" : {"volume_id": volume_id, + "instance_id" : instance_id, + "mountpoint" : device}}) + return defer.succeed({'attachTime' : volume['attach_time'], + 'device' : volume['mountpoint'], + 'instanceId' : instance_id, + 'requestId' : context.request_id, + 'status' : volume['attach_status'], + 'volumeId' : volume_id}) + + + @rbac.allow('projectmanager', 'sysadmin') + def detach_volume(self, context, volume_id, **kwargs): + volume = self._get_volume(context, volume_id) + instance_id = volume.get('instance_id', None) + if not instance_id: + raise exception.Error("Volume isn't attached to anything!") + if volume['status'] == "available": + raise exception.Error("Volume is already detached") + try: + volume.start_detach() + instance = self._get_instance(context, instance_id) + rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "detach_volume", + "args" : {"instance_id": instance_id, + "volume_id": volume_id}}) + except exception.NotFound: + # If the instance doesn't exist anymore, + # then we need to call detach blind + volume.finish_detach() + return defer.succeed({'attachTime' : volume['attach_time'], + 'device' : volume['mountpoint'], + 'instanceId' : instance_id, + 'requestId' : context.request_id, + 'status' : volume['attach_status'], + 'volumeId' : volume_id}) + + def _convert_to_set(self, lst, label): + if lst == None or lst == []: + return None + if not isinstance(lst, list): + lst = [lst] + return [{label: x} for x in lst] + + @rbac.allow('all') + def describe_instances(self, context, **kwargs): + return defer.succeed(self._format_instances(context)) + + def _format_instances(self, context, reservation_id = None): + reservations = {} + if context.user.is_admin(): + instgenerator = self.instdir.all + else: + instgenerator = self.instdir.by_project(context.project.id) + for instance in instgenerator: + res_id = instance.get('reservation_id', 'Unknown') + if reservation_id != None and reservation_id != res_id: + continue + if not context.user.is_admin(): + if instance['image_id'] == FLAGS.vpn_image_id: + continue + i = {} + i['instance_id'] = instance.get('instance_id', None) + i['image_id'] = instance.get('image_id', None) + i['instance_state'] = { + 'code': instance.get('state', 0), + 'name': instance.get('state_description', 'pending') + } + i['public_dns_name'] = network_model.get_public_ip_for_instance( + i['instance_id']) + i['private_dns_name'] = instance.get('private_dns_name', None) + if not i['public_dns_name']: + i['public_dns_name'] = i['private_dns_name'] + i['dns_name'] = instance.get('dns_name', None) + i['key_name'] = instance.get('key_name', None) + if context.user.is_admin(): + i['key_name'] = '%s (%s, %s)' % (i['key_name'], + instance.get('project_id', None), instance.get('node_name','')) + i['product_codes_set'] = self._convert_to_set( + instance.get('product_codes', None), 'product_code') + i['instance_type'] = instance.get('instance_type', None) + i['launch_time'] = instance.get('launch_time', None) + i['ami_launch_index'] = instance.get('ami_launch_index', + None) + if not reservations.has_key(res_id): + r = {} + r['reservation_id'] = res_id + r['owner_id'] = instance.get('project_id', None) + r['group_set'] = self._convert_to_set( + instance.get('groups', None), 'group_id') + r['instances_set'] = [] + reservations[res_id] = r + reservations[res_id]['instances_set'].append(i) + + instance_response = {'reservationSet' : list(reservations.values()) } + return instance_response + + @rbac.allow('all') + def describe_addresses(self, context, **kwargs): + return self.format_addresses(context) + + def format_addresses(self, context): + addresses = [] + for address in network_model.PublicAddress.all(): + # TODO(vish): implement a by_project iterator for addresses + if (context.user.is_admin() or + address['project_id'] == context.project.id): + address_rv = { + 'public_ip': address['address'], + 'instance_id' : address.get('instance_id', 'free') + } + if context.user.is_admin(): + address_rv['instance_id'] = "%s (%s, %s)" % ( + address['instance_id'], + address['user_id'], + address['project_id'], + ) + addresses.append(address_rv) + return {'addressesSet': addresses} + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def allocate_address(self, context, **kwargs): + network_topic = yield self._get_network_topic(context) + alloc_result = yield rpc.call(network_topic, + {"method": "allocate_elastic_ip", + "args": {"user_id": context.user.id, + "project_id": context.project.id}}) + public_ip = alloc_result['result'] + defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def release_address(self, context, public_ip, **kwargs): + # NOTE(vish): Should we make sure this works? + network_topic = yield self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "deallocate_elastic_ip", + "args": {"elastic_ip": public_ip}}) + defer.returnValue({'releaseResponse': ["Address released."]}) + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def associate_address(self, context, instance_id, public_ip, **kwargs): + instance = self._get_instance(context, instance_id) + address = self._get_address(context, public_ip) + network_topic = yield self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "associate_elastic_ip", + "args": {"elastic_ip": address['address'], + "fixed_ip": instance['private_dns_name'], + "instance_id": instance['instance_id']}}) + defer.returnValue({'associateResponse': ["Address associated."]}) + + @rbac.allow('netadmin') + @defer.inlineCallbacks + def disassociate_address(self, context, public_ip, **kwargs): + address = self._get_address(context, public_ip) + network_topic = yield self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "disassociate_elastic_ip", + "args": {"elastic_ip": address['address']}}) + defer.returnValue({'disassociateResponse': ["Address disassociated."]}) + + @defer.inlineCallbacks + def _get_network_topic(self, context): + """Retrieves the network host for a project""" + host = network_service.get_host_for_project(context.project.id) + if not host: + result = yield rpc.call(FLAGS.network_topic, + {"method": "set_network_host", + "args": {"user_id": context.user.id, + "project_id": context.project.id}}) + host = result['result'] + defer.returnValue('%s.%s' %(FLAGS.network_topic, host)) + + @rbac.allow('projectmanager', 'sysadmin') + @defer.inlineCallbacks + def run_instances(self, context, **kwargs): + # make sure user can access the image + # vpn image is private so it doesn't show up on lists + if kwargs['image_id'] != FLAGS.vpn_image_id: + image = self._get_image(context, kwargs['image_id']) + + # FIXME(ja): if image is cloudpipe, this breaks + + # get defaults from imagestore + image_id = image['imageId'] + kernel_id = image.get('kernelId', FLAGS.default_kernel) + ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + + # API parameters overrides of defaults + kernel_id = kwargs.get('kernel_id', kernel_id) + ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) + + # make sure we have access to kernel and ramdisk + self._get_image(context, kernel_id) + self._get_image(context, ramdisk_id) + + logging.debug("Going to run instances...") + reservation_id = utils.generate_uid('r') + launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + key_data = None + if kwargs.has_key('key_name'): + key_pair = context.user.get_key_pair(kwargs['key_name']) + if not key_pair: + raise exception.ApiError('Key Pair %s not found' % + kwargs['key_name']) + key_data = key_pair.public_key + network_topic = yield self._get_network_topic(context) + # TODO: Get the real security group of launch in here + security_group = "default" + for num in range(int(kwargs['max_count'])): + vpn = False + if image_id == FLAGS.vpn_image_id: + vpn = True + allocate_result = yield rpc.call(network_topic, + {"method": "allocate_fixed_ip", + "args": {"user_id": context.user.id, + "project_id": context.project.id, + "security_group": security_group, + "vpn": vpn}}) + allocate_data = allocate_result['result'] + inst = self.instdir.new() + inst['image_id'] = image_id + inst['kernel_id'] = kernel_id + inst['ramdisk_id'] = ramdisk_id + inst['user_data'] = kwargs.get('user_data', '') + inst['instance_type'] = kwargs.get('instance_type', 'm1.small') + inst['reservation_id'] = reservation_id + inst['launch_time'] = launch_time + inst['key_data'] = key_data or '' + inst['key_name'] = kwargs.get('key_name', '') + inst['user_id'] = context.user.id + inst['project_id'] = context.project.id + inst['ami_launch_index'] = num + inst['security_group'] = security_group + for (key, value) in allocate_data.iteritems(): + inst[key] = value + + inst.save() + rpc.cast(FLAGS.compute_topic, + {"method": "run_instance", + "args": {"instance_id" : inst.instance_id}}) + logging.debug("Casting to node for %s's instance with IP of %s" % + (context.user.name, inst['private_dns_name'])) + # TODO: Make Network figure out the network name from ip. + defer.returnValue(self._format_instances(context, reservation_id)) + + @rbac.allow('projectmanager', 'sysadmin') + @defer.inlineCallbacks + def terminate_instances(self, context, instance_id, **kwargs): + logging.debug("Going to start terminating instances") + network_topic = yield self._get_network_topic(context) + for i in instance_id: + logging.debug("Going to try and terminate %s" % i) + try: + instance = self._get_instance(context, i) + except exception.NotFound: + logging.warning("Instance %s was not found during terminate" + % i) + continue + elastic_ip = network_model.get_public_ip_for_instance(i) + if elastic_ip: + logging.debug("Disassociating address %s" % elastic_ip) + # NOTE(vish): Right now we don't really care if the ip is + # disassociated. We may need to worry about + # checking this later. Perhaps in the scheduler? + rpc.cast(network_topic, + {"method": "disassociate_elastic_ip", + "args": {"elastic_ip": elastic_ip}}) + + fixed_ip = instance.get('private_dns_name', None) + if fixed_ip: + logging.debug("Deallocating address %s" % fixed_ip) + # NOTE(vish): Right now we don't really care if the ip is + # actually removed. We may need to worry about + # checking this later. Perhaps in the scheduler? + rpc.cast(network_topic, + {"method": "deallocate_fixed_ip", + "args": {"fixed_ip": fixed_ip}}) + + if instance.get('node_name', 'unassigned') != 'unassigned': + # NOTE(joshua?): It's also internal default + rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "terminate_instance", + "args": {"instance_id": i}}) + else: + instance.destroy() + defer.returnValue(True) + + @rbac.allow('projectmanager', 'sysadmin') + def reboot_instances(self, context, instance_id, **kwargs): + """instance_id is a list of instance ids""" + for i in instance_id: + instance = self._get_instance(context, i) + rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), + {"method": "reboot_instance", + "args" : {"instance_id": i}}) + return defer.succeed(True) + + @rbac.allow('projectmanager', 'sysadmin') + def delete_volume(self, context, volume_id, **kwargs): + # TODO: return error if not authorized + volume = self._get_volume(context, volume_id) + volume_node = volume['node_name'] + rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), + {"method": "delete_volume", + "args" : {"volume_id": volume_id}}) + return defer.succeed(True) + + @rbac.allow('all') + def describe_images(self, context, image_id=None, **kwargs): + # The objectstore does its own authorization for describe + imageSet = images.list(context, image_id) + return defer.succeed({'imagesSet': imageSet}) + + @rbac.allow('projectmanager', 'sysadmin') + def deregister_image(self, context, image_id, **kwargs): + # FIXME: should the objectstore be doing these authorization checks? + images.deregister(context, image_id) + return defer.succeed({'imageId': image_id}) + + @rbac.allow('projectmanager', 'sysadmin') + def register_image(self, context, image_location=None, **kwargs): + # FIXME: should the objectstore be doing these authorization checks? + if image_location is None and kwargs.has_key('name'): + image_location = kwargs['name'] + image_id = images.register(context, image_location) + logging.debug("Registered %s as %s" % (image_location, image_id)) + + return defer.succeed({'imageId': image_id}) + + @rbac.allow('all') + def describe_image_attribute(self, context, image_id, attribute, **kwargs): + if attribute != 'launchPermission': + raise exception.ApiError('attribute not supported: %s' % attribute) + try: + image = images.list(context, image_id)[0] + except IndexError: + raise exception.ApiError('invalid id: %s' % image_id) + result = { 'image_id': image_id, 'launchPermission': [] } + if image['isPublic']: + result['launchPermission'].append({ 'group': 'all' }) + return defer.succeed(result) + + @rbac.allow('projectmanager', 'sysadmin') + def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): + # TODO(devcamcar): Support users and groups other than 'all'. + if attribute != 'launchPermission': + raise exception.ApiError('attribute not supported: %s' % attribute) + if not 'user_group' in kwargs: + raise exception.ApiError('user or group not specified') + if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': + raise exception.ApiError('only group "all" is supported') + if not operation_type in ['add', 'remove']: + raise exception.ApiError('operation_type must be add or remove') + result = images.modify(context, image_id, operation_type) + return defer.succeed(result) + + def update_state(self, topic, value): + """ accepts status reports from the queue and consolidates them """ + # TODO(jmc): if an instance has disappeared from + # the node, call instance_death + if topic == "instances": + return defer.succeed(True) + aggregate_state = getattr(self, topic) + node_name = value.keys()[0] + items = value[node_name] + + logging.debug("Updating %s state for %s" % (topic, node_name)) + + for item_id in items.keys(): + if (aggregate_state.has_key('pending') and + aggregate_state['pending'].has_key(item_id)): + del aggregate_state['pending'][item_id] + aggregate_state[node_name] = items + + return defer.succeed(True) diff --git a/nova/endpoint/aws/images.py b/nova/endpoint/aws/images.py new file mode 100644 index 000000000..fe7cb5d11 --- /dev/null +++ b/nova/endpoint/aws/images.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Proxy AMI-related calls from the cloud controller, to the running +objectstore daemon. +""" + +import boto.s3.connection +import json +import urllib + +from nova import flags +from nova import utils +from nova.auth import manager + + +FLAGS = flags.FLAGS + +def modify(context, image_id, operation): + conn(context).make_request( + method='POST', + bucket='_images', + query_args=qs({'image_id': image_id, 'operation': operation})) + + return True + + +def register(context, image_location): + """ rpc call to register a new image based from a manifest """ + + image_id = utils.generate_uid('ami') + conn(context).make_request( + method='PUT', + bucket='_images', + query_args=qs({'image_location': image_location, + 'image_id': image_id})) + + return image_id + +def list(context, filter_list=[]): + """ return a list of all images that a user can see + + optionally filtered by a list of image_id """ + + # FIXME: send along the list of only_images to check for + response = conn(context).make_request( + method='GET', + bucket='_images') + + result = json.loads(response.read()) + if not filter_list is None: + return [i for i in result if i['imageId'] in filter_list] + return result + +def deregister(context, image_id): + """ unregister an image """ + conn(context).make_request( + method='DELETE', + bucket='_images', + query_args=qs({'image_id': image_id})) + +def conn(context): + access = manager.AuthManager().get_access_key(context.user, + context.project) + secret = str(context.user.secret) + calling = boto.s3.connection.OrdinaryCallingFormat() + return boto.s3.connection.S3Connection(aws_access_key_id=access, + aws_secret_access_key=secret, + is_secure=False, + calling_format=calling, + port=FLAGS.s3_port, + host=FLAGS.s3_host) + + +def qs(params): + pairs = [] + for key in params.keys(): + pairs.append(key + '=' + urllib.quote(params[key])) + return '&'.join(pairs) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py deleted file mode 100644 index 878d54a15..000000000 --- a/nova/endpoint/cloud.py +++ /dev/null @@ -1,729 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Cloud Controller: Implementation of EC2 REST API calls, which are -dispatched to other nodes via AMQP RPC. State is via distributed -datastore. -""" - -import base64 -import logging -import os -import time -from twisted.internet import defer - -from nova import datastore -from nova import exception -from nova import flags -from nova import rpc -from nova import utils -from nova.auth import rbac -from nova.auth import manager -from nova.compute import model -from nova.compute.instance_types import INSTANCE_TYPES -from nova.endpoint import images -from nova.network import service as network_service -from nova.network import model as network_model -from nova.volume import service - - -FLAGS = flags.FLAGS - -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - -def _gen_key(user_id, key_name): - """ Tuck this into AuthManager """ - try: - mgr = manager.AuthManager() - private_key, fingerprint = mgr.generate_key_pair(user_id, key_name) - except Exception as ex: - return {'exception': ex} - return {'private_key': private_key, 'fingerprint': fingerprint} - - -class CloudController(object): - """ CloudController provides the critical dispatch between - inbound API calls through the endpoint and messages - sent to the other nodes. -""" - def __init__(self): - self.instdir = model.InstanceDirectory() - self.setup() - - @property - def instances(self): - """ All instances in the system, as dicts """ - return self.instdir.all - - @property - def volumes(self): - """ returns a list of all volumes """ - for volume_id in datastore.Redis.instance().smembers("volumes"): - volume = service.get_volume(volume_id) - yield volume - - def __str__(self): - return 'CloudController' - - def setup(self): - """ Ensure the keychains and folders exist. """ - # Create keys folder, if it doesn't exist - if not os.path.exists(FLAGS.keys_path): - os.makedirs(os.path.abspath(FLAGS.keys_path)) - # Gen root CA, if we don't have one - root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) - if not os.path.exists(root_ca_path): - start = os.getcwd() - os.chdir(FLAGS.ca_path) - utils.runthis("Generating root CA: %s", "sh genrootca.sh") - os.chdir(start) - # TODO: Do this with M2Crypto instead - - def get_instance_by_ip(self, ip): - return self.instdir.by_ip(ip) - - def _get_mpi_data(self, project_id): - result = {} - for instance in self.instdir.all: - if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) - if instance['key_name'] in result: - result[instance['key_name']].append(line) - else: - result[instance['key_name']] = [line] - return result - - def get_metadata(self, ip): - i = self.get_instance_by_ip(ip) - if i is None: - return None - mpi = self._get_mpi_data(i['project_id']) - if i['key_name']: - keys = { - '0': { - '_name': i['key_name'], - 'openssh-key': i['key_data'] - } - } - else: - keys = '' - data = { - 'user-data': base64.b64decode(i['user_data']), - 'meta-data': { - 'ami-id': i['image_id'], - 'ami-launch-index': i['ami_launch_index'], - 'ami-manifest-path': 'FIXME', # image property - 'block-device-mapping': { # TODO: replace with real data - 'ami': 'sda1', - 'ephemeral0': 'sda2', - 'root': '/dev/sda1', - 'swap': 'sda3' - }, - 'hostname': i['private_dns_name'], # is this public sometimes? - 'instance-action': 'none', - 'instance-id': i['instance_id'], - 'instance-type': i.get('instance_type', ''), - 'local-hostname': i['private_dns_name'], - 'local-ipv4': i['private_dns_name'], # TODO: switch to IP - 'kernel-id': i.get('kernel_id', ''), - 'placement': { - 'availaibility-zone': i.get('availability_zone', 'nova'), - }, - 'public-hostname': i.get('dns_name', ''), - 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP - 'public-keys' : keys, - 'ramdisk-id': i.get('ramdisk_id', ''), - 'reservation-id': i['reservation_id'], - 'security-groups': i.get('groups', ''), - 'mpi': mpi - } - } - if False: # TODO: store ancestor ids - data['ancestor-ami-ids'] = [] - if i.get('product_codes', None): - data['product-codes'] = i['product_codes'] - return data - - @rbac.allow('all') - def describe_availability_zones(self, context, **kwargs): - return {'availabilityZoneInfo': [{'zoneName': 'nova', - 'zoneState': 'available'}]} - - @rbac.allow('all') - def describe_regions(self, context, region_name=None, **kwargs): - # TODO(vish): region_name is an array. Support filtering - return {'regionInfo': [{'regionName': 'nova', - 'regionUrl': FLAGS.ec2_url}]} - - @rbac.allow('all') - def describe_snapshots(self, - context, - snapshot_id=None, - owner=None, - restorable_by=None, - **kwargs): - return {'snapshotSet': [{'snapshotId': 'fixme', - 'volumeId': 'fixme', - 'status': 'fixme', - 'startTime': 'fixme', - 'progress': 'fixme', - 'ownerId': 'fixme', - 'volumeSize': 0, - 'description': 'fixme'}]} - - @rbac.allow('all') - def describe_key_pairs(self, context, key_name=None, **kwargs): - key_pairs = context.user.get_key_pairs() - if not key_name is None: - key_pairs = [x for x in key_pairs if x.name in key_name] - - result = [] - for key_pair in key_pairs: - # filter out the vpn keys - suffix = FLAGS.vpn_key_suffix - if context.user.is_admin() or not key_pair.name.endswith(suffix): - result.append({ - 'keyName': key_pair.name, - 'keyFingerprint': key_pair.fingerprint, - }) - - return { 'keypairsSet': result } - - @rbac.allow('all') - def create_key_pair(self, context, key_name, **kwargs): - try: - d = defer.Deferred() - p = context.handler.application.settings.get('pool') - def _complete(kwargs): - if 'exception' in kwargs: - d.errback(kwargs['exception']) - return - d.callback({'keyName': key_name, - 'keyFingerprint': kwargs['fingerprint'], - 'keyMaterial': kwargs['private_key']}) - p.apply_async(_gen_key, [context.user.id, key_name], - callback=_complete) - return d - - except manager.UserError as e: - raise - - @rbac.allow('all') - def delete_key_pair(self, context, key_name, **kwargs): - context.user.delete_key_pair(key_name) - # aws returns true even if the key doens't exist - return True - - @rbac.allow('all') - def describe_security_groups(self, context, group_names, **kwargs): - groups = { 'securityGroupSet': [] } - - # Stubbed for now to unblock other things. - return groups - - @rbac.allow('netadmin') - def create_security_group(self, context, group_name, **kwargs): - return True - - @rbac.allow('netadmin') - def delete_security_group(self, context, group_name, **kwargs): - return True - - @rbac.allow('projectmanager', 'sysadmin') - def get_console_output(self, context, instance_id, **kwargs): - # instance_id is passed in as a list of instances - instance = self._get_instance(context, instance_id[0]) - return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "get_console_output", - "args" : {"instance_id": instance_id[0]}}) - - def _get_user_id(self, context): - if context and context.user: - return context.user.id - else: - return None - - @rbac.allow('projectmanager', 'sysadmin') - def describe_volumes(self, context, **kwargs): - volumes = [] - for volume in self.volumes: - if context.user.is_admin() or volume['project_id'] == context.project.id: - v = self.format_volume(context, volume) - volumes.append(v) - return defer.succeed({'volumeSet': volumes}) - - def format_volume(self, context, volume): - v = {} - v['volumeId'] = volume['volume_id'] - v['status'] = volume['status'] - v['size'] = volume['size'] - v['availabilityZone'] = volume['availability_zone'] - v['createTime'] = volume['create_time'] - if context.user.is_admin(): - v['status'] = '%s (%s, %s, %s, %s)' % ( - volume.get('status', None), - volume.get('user_id', None), - volume.get('node_name', None), - volume.get('instance_id', ''), - volume.get('mountpoint', '')) - if volume['attach_status'] == 'attached': - v['attachmentSet'] = [{'attachTime': volume['attach_time'], - 'deleteOnTermination': volume['delete_on_termination'], - 'device' : volume['mountpoint'], - 'instanceId' : volume['instance_id'], - 'status' : 'attached', - 'volume_id' : volume['volume_id']}] - else: - v['attachmentSet'] = [{}] - return v - - @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks - def create_volume(self, context, size, **kwargs): - # TODO(vish): refactor this to create the volume object here and tell service to create it - result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args" : {"size": size, - "user_id": context.user.id, - "project_id": context.project.id}}) - # NOTE(vish): rpc returned value is in the result key in the dictionary - volume = self._get_volume(context, result['result']) - defer.returnValue({'volumeSet': [self.format_volume(context, volume)]}) - - def _get_address(self, context, public_ip): - # FIXME(vish) this should move into network.py - address = network_model.PublicAddress.lookup(public_ip) - if address and (context.user.is_admin() or address['project_id'] == context.project.id): - return address - raise exception.NotFound("Address at ip %s not found" % public_ip) - - def _get_image(self, context, image_id): - """passes in context because - objectstore does its own authorization""" - result = images.list(context, [image_id]) - if not result: - raise exception.NotFound('Image %s could not be found' % image_id) - image = result[0] - return image - - def _get_instance(self, context, instance_id): - for instance in self.instdir.all: - if instance['instance_id'] == instance_id: - if context.user.is_admin() or instance['project_id'] == context.project.id: - return instance - raise exception.NotFound('Instance %s could not be found' % instance_id) - - def _get_volume(self, context, volume_id): - volume = service.get_volume(volume_id) - if context.user.is_admin() or volume['project_id'] == context.project.id: - return volume - raise exception.NotFound('Volume %s could not be found' % volume_id) - - @rbac.allow('projectmanager', 'sysadmin') - def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - volume = self._get_volume(context, volume_id) - if volume['status'] == "attached": - raise exception.ApiError("Volume is already attached") - # TODO(vish): looping through all volumes is slow. We should probably maintain an index - for vol in self.volumes: - if vol['instance_id'] == instance_id and vol['mountpoint'] == device: - raise exception.ApiError("Volume %s is already attached to %s" % (vol['volume_id'], vol['mountpoint'])) - volume.start_attach(instance_id, device) - instance = self._get_instance(context, instance_id) - compute_node = instance['node_name'] - rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), - {"method": "attach_volume", - "args" : {"volume_id": volume_id, - "instance_id" : instance_id, - "mountpoint" : device}}) - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) - - - @rbac.allow('projectmanager', 'sysadmin') - def detach_volume(self, context, volume_id, **kwargs): - volume = self._get_volume(context, volume_id) - instance_id = volume.get('instance_id', None) - if not instance_id: - raise exception.Error("Volume isn't attached to anything!") - if volume['status'] == "available": - raise exception.Error("Volume is already detached") - try: - volume.start_detach() - instance = self._get_instance(context, instance_id) - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "detach_volume", - "args" : {"instance_id": instance_id, - "volume_id": volume_id}}) - except exception.NotFound: - # If the instance doesn't exist anymore, - # then we need to call detach blind - volume.finish_detach() - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) - - def _convert_to_set(self, lst, label): - if lst == None or lst == []: - return None - if not isinstance(lst, list): - lst = [lst] - return [{label: x} for x in lst] - - @rbac.allow('all') - def describe_instances(self, context, **kwargs): - return defer.succeed(self._format_instances(context)) - - def _format_instances(self, context, reservation_id = None): - reservations = {} - if context.user.is_admin(): - instgenerator = self.instdir.all - else: - instgenerator = self.instdir.by_project(context.project.id) - for instance in instgenerator: - res_id = instance.get('reservation_id', 'Unknown') - if reservation_id != None and reservation_id != res_id: - continue - if not context.user.is_admin(): - if instance['image_id'] == FLAGS.vpn_image_id: - continue - i = {} - i['instance_id'] = instance.get('instance_id', None) - i['image_id'] = instance.get('image_id', None) - i['instance_state'] = { - 'code': instance.get('state', 0), - 'name': instance.get('state_description', 'pending') - } - i['public_dns_name'] = network_model.get_public_ip_for_instance( - i['instance_id']) - i['private_dns_name'] = instance.get('private_dns_name', None) - if not i['public_dns_name']: - i['public_dns_name'] = i['private_dns_name'] - i['dns_name'] = instance.get('dns_name', None) - i['key_name'] = instance.get('key_name', None) - if context.user.is_admin(): - i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), instance.get('node_name','')) - i['product_codes_set'] = self._convert_to_set( - instance.get('product_codes', None), 'product_code') - i['instance_type'] = instance.get('instance_type', None) - i['launch_time'] = instance.get('launch_time', None) - i['ami_launch_index'] = instance.get('ami_launch_index', - None) - if not reservations.has_key(res_id): - r = {} - r['reservation_id'] = res_id - r['owner_id'] = instance.get('project_id', None) - r['group_set'] = self._convert_to_set( - instance.get('groups', None), 'group_id') - r['instances_set'] = [] - reservations[res_id] = r - reservations[res_id]['instances_set'].append(i) - - instance_response = {'reservationSet' : list(reservations.values()) } - return instance_response - - @rbac.allow('all') - def describe_addresses(self, context, **kwargs): - return self.format_addresses(context) - - def format_addresses(self, context): - addresses = [] - for address in network_model.PublicAddress.all(): - # TODO(vish): implement a by_project iterator for addresses - if (context.user.is_admin() or - address['project_id'] == context.project.id): - address_rv = { - 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') - } - if context.user.is_admin(): - address_rv['instance_id'] = "%s (%s, %s)" % ( - address['instance_id'], - address['user_id'], - address['project_id'], - ) - addresses.append(address_rv) - return {'addressesSet': addresses} - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def allocate_address(self, context, **kwargs): - network_topic = yield self._get_network_topic(context) - alloc_result = yield rpc.call(network_topic, - {"method": "allocate_elastic_ip", - "args": {"user_id": context.user.id, - "project_id": context.project.id}}) - public_ip = alloc_result['result'] - defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def release_address(self, context, public_ip, **kwargs): - # NOTE(vish): Should we make sure this works? - network_topic = yield self._get_network_topic(context) - rpc.cast(network_topic, - {"method": "deallocate_elastic_ip", - "args": {"elastic_ip": public_ip}}) - defer.returnValue({'releaseResponse': ["Address released."]}) - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def associate_address(self, context, instance_id, public_ip, **kwargs): - instance = self._get_instance(context, instance_id) - address = self._get_address(context, public_ip) - network_topic = yield self._get_network_topic(context) - rpc.cast(network_topic, - {"method": "associate_elastic_ip", - "args": {"elastic_ip": address['address'], - "fixed_ip": instance['private_dns_name'], - "instance_id": instance['instance_id']}}) - defer.returnValue({'associateResponse': ["Address associated."]}) - - @rbac.allow('netadmin') - @defer.inlineCallbacks - def disassociate_address(self, context, public_ip, **kwargs): - address = self._get_address(context, public_ip) - network_topic = yield self._get_network_topic(context) - rpc.cast(network_topic, - {"method": "disassociate_elastic_ip", - "args": {"elastic_ip": address['address']}}) - defer.returnValue({'disassociateResponse': ["Address disassociated."]}) - - @defer.inlineCallbacks - def _get_network_topic(self, context): - """Retrieves the network host for a project""" - host = network_service.get_host_for_project(context.project.id) - if not host: - result = yield rpc.call(FLAGS.network_topic, - {"method": "set_network_host", - "args": {"user_id": context.user.id, - "project_id": context.project.id}}) - host = result['result'] - defer.returnValue('%s.%s' %(FLAGS.network_topic, host)) - - @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks - def run_instances(self, context, **kwargs): - # make sure user can access the image - # vpn image is private so it doesn't show up on lists - if kwargs['image_id'] != FLAGS.vpn_image_id: - image = self._get_image(context, kwargs['image_id']) - - # FIXME(ja): if image is cloudpipe, this breaks - - # get defaults from imagestore - image_id = image['imageId'] - kernel_id = image.get('kernelId', FLAGS.default_kernel) - ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) - - # API parameters overrides of defaults - kernel_id = kwargs.get('kernel_id', kernel_id) - ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) - - # make sure we have access to kernel and ramdisk - self._get_image(context, kernel_id) - self._get_image(context, ramdisk_id) - - logging.debug("Going to run instances...") - reservation_id = utils.generate_uid('r') - launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - key_data = None - if kwargs.has_key('key_name'): - key_pair = context.user.get_key_pair(kwargs['key_name']) - if not key_pair: - raise exception.ApiError('Key Pair %s not found' % - kwargs['key_name']) - key_data = key_pair.public_key - network_topic = yield self._get_network_topic(context) - # TODO: Get the real security group of launch in here - security_group = "default" - for num in range(int(kwargs['max_count'])): - vpn = False - if image_id == FLAGS.vpn_image_id: - vpn = True - allocate_result = yield rpc.call(network_topic, - {"method": "allocate_fixed_ip", - "args": {"user_id": context.user.id, - "project_id": context.project.id, - "security_group": security_group, - "vpn": vpn}}) - allocate_data = allocate_result['result'] - inst = self.instdir.new() - inst['image_id'] = image_id - inst['kernel_id'] = kernel_id - inst['ramdisk_id'] = ramdisk_id - inst['user_data'] = kwargs.get('user_data', '') - inst['instance_type'] = kwargs.get('instance_type', 'm1.small') - inst['reservation_id'] = reservation_id - inst['launch_time'] = launch_time - inst['key_data'] = key_data or '' - inst['key_name'] = kwargs.get('key_name', '') - inst['user_id'] = context.user.id - inst['project_id'] = context.project.id - inst['ami_launch_index'] = num - inst['security_group'] = security_group - for (key, value) in allocate_data.iteritems(): - inst[key] = value - - inst.save() - rpc.cast(FLAGS.compute_topic, - {"method": "run_instance", - "args": {"instance_id" : inst.instance_id}}) - logging.debug("Casting to node for %s's instance with IP of %s" % - (context.user.name, inst['private_dns_name'])) - # TODO: Make Network figure out the network name from ip. - defer.returnValue(self._format_instances(context, reservation_id)) - - @rbac.allow('projectmanager', 'sysadmin') - @defer.inlineCallbacks - def terminate_instances(self, context, instance_id, **kwargs): - logging.debug("Going to start terminating instances") - network_topic = yield self._get_network_topic(context) - for i in instance_id: - logging.debug("Going to try and terminate %s" % i) - try: - instance = self._get_instance(context, i) - except exception.NotFound: - logging.warning("Instance %s was not found during terminate" - % i) - continue - elastic_ip = network_model.get_public_ip_for_instance(i) - if elastic_ip: - logging.debug("Disassociating address %s" % elastic_ip) - # NOTE(vish): Right now we don't really care if the ip is - # disassociated. We may need to worry about - # checking this later. Perhaps in the scheduler? - rpc.cast(network_topic, - {"method": "disassociate_elastic_ip", - "args": {"elastic_ip": elastic_ip}}) - - fixed_ip = instance.get('private_dns_name', None) - if fixed_ip: - logging.debug("Deallocating address %s" % fixed_ip) - # NOTE(vish): Right now we don't really care if the ip is - # actually removed. We may need to worry about - # checking this later. Perhaps in the scheduler? - rpc.cast(network_topic, - {"method": "deallocate_fixed_ip", - "args": {"fixed_ip": fixed_ip}}) - - if instance.get('node_name', 'unassigned') != 'unassigned': - # NOTE(joshua?): It's also internal default - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "terminate_instance", - "args": {"instance_id": i}}) - else: - instance.destroy() - defer.returnValue(True) - - @rbac.allow('projectmanager', 'sysadmin') - def reboot_instances(self, context, instance_id, **kwargs): - """instance_id is a list of instance ids""" - for i in instance_id: - instance = self._get_instance(context, i) - rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), - {"method": "reboot_instance", - "args" : {"instance_id": i}}) - return defer.succeed(True) - - @rbac.allow('projectmanager', 'sysadmin') - def delete_volume(self, context, volume_id, **kwargs): - # TODO: return error if not authorized - volume = self._get_volume(context, volume_id) - volume_node = volume['node_name'] - rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), - {"method": "delete_volume", - "args" : {"volume_id": volume_id}}) - return defer.succeed(True) - - @rbac.allow('all') - def describe_images(self, context, image_id=None, **kwargs): - # The objectstore does its own authorization for describe - imageSet = images.list(context, image_id) - return defer.succeed({'imagesSet': imageSet}) - - @rbac.allow('projectmanager', 'sysadmin') - def deregister_image(self, context, image_id, **kwargs): - # FIXME: should the objectstore be doing these authorization checks? - images.deregister(context, image_id) - return defer.succeed({'imageId': image_id}) - - @rbac.allow('projectmanager', 'sysadmin') - def register_image(self, context, image_location=None, **kwargs): - # FIXME: should the objectstore be doing these authorization checks? - if image_location is None and kwargs.has_key('name'): - image_location = kwargs['name'] - image_id = images.register(context, image_location) - logging.debug("Registered %s as %s" % (image_location, image_id)) - - return defer.succeed({'imageId': image_id}) - - @rbac.allow('all') - def describe_image_attribute(self, context, image_id, attribute, **kwargs): - if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) - try: - image = images.list(context, image_id)[0] - except IndexError: - raise exception.ApiError('invalid id: %s' % image_id) - result = { 'image_id': image_id, 'launchPermission': [] } - if image['isPublic']: - result['launchPermission'].append({ 'group': 'all' }) - return defer.succeed(result) - - @rbac.allow('projectmanager', 'sysadmin') - def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): - # TODO(devcamcar): Support users and groups other than 'all'. - if attribute != 'launchPermission': - raise exception.ApiError('attribute not supported: %s' % attribute) - if not 'user_group' in kwargs: - raise exception.ApiError('user or group not specified') - if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': - raise exception.ApiError('only group "all" is supported') - if not operation_type in ['add', 'remove']: - raise exception.ApiError('operation_type must be add or remove') - result = images.modify(context, image_id, operation_type) - return defer.succeed(result) - - def update_state(self, topic, value): - """ accepts status reports from the queue and consolidates them """ - # TODO(jmc): if an instance has disappeared from - # the node, call instance_death - if topic == "instances": - return defer.succeed(True) - aggregate_state = getattr(self, topic) - node_name = value.keys()[0] - items = value[node_name] - - logging.debug("Updating %s state for %s" % (topic, node_name)) - - for item_id in items.keys(): - if (aggregate_state.has_key('pending') and - aggregate_state['pending'].has_key(item_id)): - del aggregate_state['pending'][item_id] - aggregate_state[node_name] = items - - return defer.succeed(True) diff --git a/nova/endpoint/images.py b/nova/endpoint/images.py deleted file mode 100644 index fe7cb5d11..000000000 --- a/nova/endpoint/images.py +++ /dev/null @@ -1,95 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Proxy AMI-related calls from the cloud controller, to the running -objectstore daemon. -""" - -import boto.s3.connection -import json -import urllib - -from nova import flags -from nova import utils -from nova.auth import manager - - -FLAGS = flags.FLAGS - -def modify(context, image_id, operation): - conn(context).make_request( - method='POST', - bucket='_images', - query_args=qs({'image_id': image_id, 'operation': operation})) - - return True - - -def register(context, image_location): - """ rpc call to register a new image based from a manifest """ - - image_id = utils.generate_uid('ami') - conn(context).make_request( - method='PUT', - bucket='_images', - query_args=qs({'image_location': image_location, - 'image_id': image_id})) - - return image_id - -def list(context, filter_list=[]): - """ return a list of all images that a user can see - - optionally filtered by a list of image_id """ - - # FIXME: send along the list of only_images to check for - response = conn(context).make_request( - method='GET', - bucket='_images') - - result = json.loads(response.read()) - if not filter_list is None: - return [i for i in result if i['imageId'] in filter_list] - return result - -def deregister(context, image_id): - """ unregister an image """ - conn(context).make_request( - method='DELETE', - bucket='_images', - query_args=qs({'image_id': image_id})) - -def conn(context): - access = manager.AuthManager().get_access_key(context.user, - context.project) - secret = str(context.user.secret) - calling = boto.s3.connection.OrdinaryCallingFormat() - return boto.s3.connection.S3Connection(aws_access_key_id=access, - aws_secret_access_key=secret, - is_secure=False, - calling_format=calling, - port=FLAGS.s3_port, - host=FLAGS.s3_host) - - -def qs(params): - pairs = [] - for key in params.keys(): - pairs.append(key + '=' + urllib.quote(params[key])) - return '&'.join(pairs) diff --git a/nova/endpoint/rackspace.py b/nova/endpoint/rackspace.py deleted file mode 100644 index b4e6cd823..000000000 --- a/nova/endpoint/rackspace.py +++ /dev/null @@ -1,186 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Rackspace API Endpoint -""" - -import json -import time - -import webob.dec -import webob.exc - -from nova import flags -from nova import rpc -from nova import utils -from nova import wsgi -from nova.auth import manager -from nova.compute import model as compute -from nova.network import model as network - - -FLAGS = flags.FLAGS -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - - -class API(wsgi.Middleware): - """Entry point for all requests.""" - - def __init__(self): - super(API, self).__init__(Router(webob.exc.HTTPNotFound())) - - @webob.dec.wsgify - def __call__(self, req): - return self.application - context = {} - if "HTTP_X_AUTH_TOKEN" in req.environ: - context['user'] = manager.AuthManager().get_user_from_access_key( - req.environ['HTTP_X_AUTH_TOKEN']) - if context['user']: - context['project'] = manager.AuthManager().get_project( - context['user'].name) - if "user" not in context: - return webob.exc.HTTPForbidden() - environ['nova.context'] = context - return self.application - - -class Router(wsgi.Router): - """Route requests to the next WSGI application.""" - - def _build_map(self): - """Build routing map for authentication and cloud.""" - self.map.resource("server", "servers", controller=CloudServerAPI()) - #self._connect("/v1.0", controller=AuthenticationAPI()) - #cloud = CloudServerAPI() - #self._connect("/servers", controller=cloud.launch_server, - # conditions={"method": ["POST"]}) - #self._connect("/servers/{server_id}", controller=cloud.delete_server, - # conditions={'method': ["DELETE"]}) - #self._connect("/servers", controller=cloud) - - -class AuthenticationAPI(wsgi.Application): - """Handle all authorization requests through WSGI applications.""" - - @webob.dec.wsgify - def __call__(self, req): # pylint: disable-msg=W0221 - # TODO(todd): make a actual session with a unique token - # just pass the auth key back through for now - res = webob.Response() - res.status = '204 No Content' - res.headers.add('X-Server-Management-Url', req.host_url) - res.headers.add('X-Storage-Url', req.host_url) - res.headers.add('X-CDN-Managment-Url', req.host_url) - res.headers.add('X-Auth-Token', req.headers['X-Auth-Key']) - return res - - -class CloudServerAPI(wsgi.Application): - """Handle all server requests through WSGI applications.""" - - def __init__(self): - super(CloudServerAPI, self).__init__() - self.instdir = compute.InstanceDirectory() - self.network = network.PublicNetworkController() - - @webob.dec.wsgify - def __call__(self, req): # pylint: disable-msg=W0221 - value = {"servers": []} - for inst in self.instdir.all: - value["servers"].append(self.instance_details(inst)) - return json.dumps(value) - - def instance_details(self, inst): # pylint: disable-msg=R0201 - """Build the data structure to represent details for an instance.""" - return { - "id": inst.get("instance_id", None), - "imageId": inst.get("image_id", None), - "flavorId": inst.get("instacne_type", None), - "hostId": inst.get("node_name", None), - "status": inst.get("state", "pending"), - "addresses": { - "public": [network.get_public_ip_for_instance( - inst.get("instance_id", None))], - "private": [inst.get("private_dns_name", None)]}, - - # implemented only by Rackspace, not AWS - "name": inst.get("name", "Not-Specified"), - - # not supported - "progress": "Not-Supported", - "metadata": { - "Server Label": "Not-Supported", - "Image Version": "Not-Supported"}} - - @webob.dec.wsgify - def launch_server(self, req): - """Launch a new instance.""" - data = json.loads(req.body) - inst = self.build_server_instance(data, req.environ['nova.context']) - rpc.cast( - FLAGS.compute_topic, { - "method": "run_instance", - "args": {"instance_id": inst.instance_id}}) - - return json.dumps({"server": self.instance_details(inst)}) - - def build_server_instance(self, env, context): - """Build instance data structure and save it to the data store.""" - reservation = utils.generate_uid('r') - ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - inst = self.instdir.new() - inst['name'] = env['server']['name'] - inst['image_id'] = env['server']['imageId'] - inst['instance_type'] = env['server']['flavorId'] - inst['user_id'] = context['user'].id - inst['project_id'] = context['project'].id - inst['reservation_id'] = reservation - inst['launch_time'] = ltime - inst['mac_address'] = utils.generate_mac() - address = self.network.allocate_ip( - inst['user_id'], - inst['project_id'], - mac=inst['mac_address']) - inst['private_dns_name'] = str(address) - inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( - inst['user_id'], - inst['project_id'], - 'default')['bridge_name'] - # key_data, key_name, ami_launch_index - # TODO(todd): key data or root password - inst.save() - return inst - - @webob.dec.wsgify - @wsgi.route_args - def delete_server(self, req, route_args): # pylint: disable-msg=R0201 - """Delete an instance.""" - owner_hostname = None - instance = compute.Instance.lookup(route_args['server_id']) - if instance: - owner_hostname = instance["node_name"] - if not owner_hostname: - return webob.exc.HTTPNotFound("Did not find image, or it was " - "not in a running state.") - rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname) - rpc.cast(rpc_transport, - {"method": "reboot_instance", - "args": {"instance_id": route_args['server_id']}}) - req.status = "202 Accepted" diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py new file mode 100644 index 000000000..a83925cc3 --- /dev/null +++ b/nova/endpoint/rackspace/controllers/base.py @@ -0,0 +1,9 @@ +class BaseController(object): + @classmethod + def render(cls, instance): + if isinstance(instance, list): + return [ cls.entity_name : { cls.render(instance) } + else + return + + diff --git a/nova/endpoint/rackspace/controllers/flavors.py b/nova/endpoint/rackspace/controllers/flavors.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/endpoint/rackspace/controllers/images.py b/nova/endpoint/rackspace/controllers/images.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/endpoint/rackspace/controllers/servers.py b/nova/endpoint/rackspace/controllers/servers.py new file mode 100644 index 000000000..af6c958bb --- /dev/null +++ b/nova/endpoint/rackspace/controllers/servers.py @@ -0,0 +1,72 @@ +from nova import rpc +from nova.compute import model as compute +from nova.endpoint.rackspace import BaseController + +class ServersController(BaseController): + entity_name = 'servers' + + def __init__(self): + raise NotImplemented("You may not create an instance of this class") + + @classmethod + def index(cls): + return [instance_details(inst) for inst in compute.InstanceDirectory().all] + + @classmethod + def show(cls, **kwargs): + instance_id = kwargs['id'] + return compute.InstanceDirectory().get(instance_id) + + @classmethod + def delete(cls, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance + raise ServerNotFound("The requested server was not found") + instance.destroy() + return True + + @classmethod + def create(cls, **kwargs): + inst = self.build_server_instance(kwargs['server']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + @classmethod + def update(cls, **kwargs): + instance_id = kwargs['id'] + instance = compute.InstanceDirectory().get(instance_id) + if not instance: + raise ServerNotFound("The requested server was not found") + instance.update(kwargs['server']) + instance.save() + + @classmethod + def build_server_instance(self, env): + """Build instance data structure and save it to the data store.""" + reservation = utils.generate_uid('r') + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = self.instdir.new() + inst['name'] = env['server']['name'] + inst['image_id'] = env['server']['imageId'] + inst['instance_type'] = env['server']['flavorId'] + inst['user_id'] = env['user']['id'] + inst['project_id'] = env['project']['id'] + inst['reservation_id'] = reservation + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + address = self.network.allocate_ip( + inst['user_id'], + inst['project_id'], + mac=inst['mac_address']) + inst['private_dns_name'] = str(address) + inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( + inst['user_id'], + inst['project_id'], + 'default')['bridge_name'] + # key_data, key_name, ami_launch_index + # TODO(todd): key data or root password + inst.save() + return inst diff --git a/nova/endpoint/rackspace/controllers/shared_ip_groups.py b/nova/endpoint/rackspace/controllers/shared_ip_groups.py new file mode 100644 index 000000000..e69de29bb diff --git a/nova/endpoint/rackspace/rackspace.py b/nova/endpoint/rackspace/rackspace.py new file mode 100644 index 000000000..75b828e91 --- /dev/null +++ b/nova/endpoint/rackspace/rackspace.py @@ -0,0 +1,183 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Rackspace API Endpoint +""" + +import json +import time + +import webob.dec +import webob.exc + +from nova import flags +from nova import rpc +from nova import utils +from nova import wsgi +from nova.auth import manager +from nova.compute import model as compute +from nova.network import model as network + + +FLAGS = flags.FLAGS +flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + + +class API(wsgi.Middleware): + """Entry point for all requests.""" + + def __init__(self): + super(API, self).__init__(Router(webob.exc.HTTPNotFound())) + + def __call__(self, environ, start_response): + context = {} + if "HTTP_X_AUTH_TOKEN" in environ: + context['user'] = manager.AuthManager().get_user_from_access_key( + environ['HTTP_X_AUTH_TOKEN']) + if context['user']: + context['project'] = manager.AuthManager().get_project( + context['user'].name) + if "user" not in context: + return webob.exc.HTTPForbidden()(environ, start_response) + environ['nova.context'] = context + return self.application(environ, start_response) + + +class Router(wsgi.Router): + """Route requests to the next WSGI application.""" + + def _build_map(self): + """Build routing map for authentication and cloud.""" + self._connect("/v1.0", controller=AuthenticationAPI()) + cloud = CloudServerAPI() + self._connect("/servers", controller=cloud.launch_server, + conditions={"method": ["POST"]}) + self._connect("/servers/{server_id}", controller=cloud.delete_server, + conditions={'method': ["DELETE"]}) + self._connect("/servers", controller=cloud) + + +class AuthenticationAPI(wsgi.Application): + """Handle all authorization requests through WSGI applications.""" + + @webob.dec.wsgify + def __call__(self, req): # pylint: disable-msg=W0221 + # TODO(todd): make a actual session with a unique token + # just pass the auth key back through for now + res = webob.Response() + res.status = '204 No Content' + res.headers.add('X-Server-Management-Url', req.host_url) + res.headers.add('X-Storage-Url', req.host_url) + res.headers.add('X-CDN-Managment-Url', req.host_url) + res.headers.add('X-Auth-Token', req.headers['X-Auth-Key']) + return res + + +class CloudServerAPI(wsgi.Application): + """Handle all server requests through WSGI applications.""" + + def __init__(self): + super(CloudServerAPI, self).__init__() + self.instdir = compute.InstanceDirectory() + self.network = network.PublicNetworkController() + + @webob.dec.wsgify + def __call__(self, req): # pylint: disable-msg=W0221 + value = {"servers": []} + for inst in self.instdir.all: + value["servers"].append(self.instance_details(inst)) + return json.dumps(value) + + def instance_details(self, inst): # pylint: disable-msg=R0201 + """Build the data structure to represent details for an instance.""" + return { + "id": inst.get("instance_id", None), + "imageId": inst.get("image_id", None), + "flavorId": inst.get("instacne_type", None), + "hostId": inst.get("node_name", None), + "status": inst.get("state", "pending"), + "addresses": { + "public": [network.get_public_ip_for_instance( + inst.get("instance_id", None))], + "private": [inst.get("private_dns_name", None)]}, + + # implemented only by Rackspace, not AWS + "name": inst.get("name", "Not-Specified"), + + # not supported + "progress": "Not-Supported", + "metadata": { + "Server Label": "Not-Supported", + "Image Version": "Not-Supported"}} + + @webob.dec.wsgify + def launch_server(self, req): + """Launch a new instance.""" + data = json.loads(req.body) + inst = self.build_server_instance(data, req.environ['nova.context']) + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst.instance_id}}) + + return json.dumps({"server": self.instance_details(inst)}) + + def build_server_instance(self, env, context): + """Build instance data structure and save it to the data store.""" + reservation = utils.generate_uid('r') + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = self.instdir.new() + inst['name'] = env['server']['name'] + inst['image_id'] = env['server']['imageId'] + inst['instance_type'] = env['server']['flavorId'] + inst['user_id'] = context['user'].id + inst['project_id'] = context['project'].id + inst['reservation_id'] = reservation + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + address = self.network.allocate_ip( + inst['user_id'], + inst['project_id'], + mac=inst['mac_address']) + inst['private_dns_name'] = str(address) + inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( + inst['user_id'], + inst['project_id'], + 'default')['bridge_name'] + # key_data, key_name, ami_launch_index + # TODO(todd): key data or root password + inst.save() + return inst + + @webob.dec.wsgify + @wsgi.route_args + def delete_server(self, req, route_args): # pylint: disable-msg=R0201 + """Delete an instance.""" + owner_hostname = None + instance = compute.Instance.lookup(route_args['server_id']) + if instance: + owner_hostname = instance["node_name"] + if not owner_hostname: + return webob.exc.HTTPNotFound("Did not find image, or it was " + "not in a running state.") + rpc_transport = "%s:%s" % (FLAGS.compute_topic, owner_hostname) + rpc.cast(rpc_transport, + {"method": "reboot_instance", + "args": {"instance_id": route_args['server_id']}}) + req.status = "202 Accepted" -- cgit From 11c47dd12adcbf2a5011510f01081db858b057db Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 12 Aug 2010 18:36:46 -0400 Subject: Mergeprop cleanup --- nova/endpoint/rackspace/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 nova/endpoint/rackspace/__init__.py diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py deleted file mode 100644 index e69de29bb..000000000 -- cgit From 39d12bf518e284183d1debd52fe7081ecf1c633d Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 12 Aug 2010 18:36:56 -0400 Subject: Mergeprop cleanup --- nova/endpoint/rackspace/__init__.py | 90 ++++++++++++++++++++++++++++++++++++ nova/endpoint/rackspace/rackspace.py | 90 ------------------------------------ 2 files changed, 90 insertions(+), 90 deletions(-) create mode 100644 nova/endpoint/rackspace/__init__.py delete mode 100644 nova/endpoint/rackspace/rackspace.py diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py new file mode 100644 index 000000000..f14f6218c --- /dev/null +++ b/nova/endpoint/rackspace/__init__.py @@ -0,0 +1,90 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Rackspace API Endpoint +""" + +import json +import time + +import webob.dec +import webob.exc +import routes + +from nova import flags +from nova import wsgi +from nova.auth import manager +from nova.endpoint.rackspace import controllers + + +FLAGS = flags.FLAGS +flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + + +class Api(wsgi.Middleware): + """WSGI entry point for all Rackspace API requests.""" + + def __init__(self): + app = AuthMiddleware(ApiRouter()) + super(Api, self).__init__(app) + + +class AuthMiddleware(wsgi.Middleware): + """Authorize the rackspace API request or return an HTTP Forbidden.""" + + #TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced + #with correct RS API auth? + + @webob.dec.wsgify + def __call__(self, req): + context = {} + if "HTTP_X_AUTH_TOKEN" in req.environ: + context['user'] = manager.AuthManager().get_user_from_access_key( + req.environ['HTTP_X_AUTH_TOKEN']) + if context['user']: + context['project'] = manager.AuthManager().get_project( + context['user'].name) + if "user" not in context: + return webob.exc.HTTPForbidden() + req.environ['nova.context'] = context + return self.application + + +class ApiRouter(wsgi.Router): + """ + Routes requests on the Rackspace API to the appropriate controller + and method. + """ + + def __init__(self): + mapper = routes.Mapper() + + mapper.resource("server", "servers") + mapper.resource("image", "images") + mapper.resource("flavor", "flavors") + mapper.resource("sharedipgroup", "sharedipgroups") + + targets = { + 'servers': controllers.ServersController(), + 'images': controllers.ImagesController(), + 'flavors': controllers.FlavorsController(), + 'sharedipgroups': controllers.SharedIpGroupsController() + } + + super(ApiRouter, self).__init__(mapper, targets) diff --git a/nova/endpoint/rackspace/rackspace.py b/nova/endpoint/rackspace/rackspace.py deleted file mode 100644 index f14f6218c..000000000 --- a/nova/endpoint/rackspace/rackspace.py +++ /dev/null @@ -1,90 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Rackspace API Endpoint -""" - -import json -import time - -import webob.dec -import webob.exc -import routes - -from nova import flags -from nova import wsgi -from nova.auth import manager -from nova.endpoint.rackspace import controllers - - -FLAGS = flags.FLAGS -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - - -class Api(wsgi.Middleware): - """WSGI entry point for all Rackspace API requests.""" - - def __init__(self): - app = AuthMiddleware(ApiRouter()) - super(Api, self).__init__(app) - - -class AuthMiddleware(wsgi.Middleware): - """Authorize the rackspace API request or return an HTTP Forbidden.""" - - #TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced - #with correct RS API auth? - - @webob.dec.wsgify - def __call__(self, req): - context = {} - if "HTTP_X_AUTH_TOKEN" in req.environ: - context['user'] = manager.AuthManager().get_user_from_access_key( - req.environ['HTTP_X_AUTH_TOKEN']) - if context['user']: - context['project'] = manager.AuthManager().get_project( - context['user'].name) - if "user" not in context: - return webob.exc.HTTPForbidden() - req.environ['nova.context'] = context - return self.application - - -class ApiRouter(wsgi.Router): - """ - Routes requests on the Rackspace API to the appropriate controller - and method. - """ - - def __init__(self): - mapper = routes.Mapper() - - mapper.resource("server", "servers") - mapper.resource("image", "images") - mapper.resource("flavor", "flavors") - mapper.resource("sharedipgroup", "sharedipgroups") - - targets = { - 'servers': controllers.ServersController(), - 'images': controllers.ImagesController(), - 'flavors': controllers.FlavorsController(), - 'sharedipgroups': controllers.SharedIpGroupsController() - } - - super(ApiRouter, self).__init__(mapper, targets) -- cgit From 4391b7362eeab2cd976309696be1209ac771ce24 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Thu, 12 Aug 2010 18:41:31 -0400 Subject: Undo the changes to cloud.py that somehow diverged from trunk --- nova/endpoint/cloud.py | 105 ++++++++++++++++++++++++------------------------- 1 file changed, 52 insertions(+), 53 deletions(-) diff --git a/nova/endpoint/cloud.py b/nova/endpoint/cloud.py index 878d54a15..ad9188ff3 100644 --- a/nova/endpoint/cloud.py +++ b/nova/endpoint/cloud.py @@ -47,6 +47,7 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') + def _gen_key(user_id, key_name): """ Tuck this into AuthManager """ try: @@ -102,15 +103,16 @@ class CloudController(object): result = {} for instance in self.instdir.all: if instance['project_id'] == project_id: - line = '%s slots=%d' % (instance['private_dns_name'], INSTANCE_TYPES[instance['instance_type']]['vcpus']) + line = '%s slots=%d' % (instance['private_dns_name'], + INSTANCE_TYPES[instance['instance_type']]['vcpus']) if instance['key_name'] in result: result[instance['key_name']].append(line) else: result[instance['key_name']] = [line] return result - def get_metadata(self, ip): - i = self.get_instance_by_ip(ip) + def get_metadata(self, ipaddress): + i = self.get_instance_by_ip(ipaddress) if i is None: return None mpi = self._get_mpi_data(i['project_id']) @@ -147,7 +149,7 @@ class CloudController(object): }, 'public-hostname': i.get('dns_name', ''), 'public-ipv4': i.get('dns_name', ''), # TODO: switch to IP - 'public-keys' : keys, + 'public-keys': keys, 'ramdisk-id': i.get('ramdisk_id', ''), 'reservation-id': i['reservation_id'], 'security-groups': i.get('groups', ''), @@ -203,26 +205,22 @@ class CloudController(object): 'keyFingerprint': key_pair.fingerprint, }) - return { 'keypairsSet': result } + return {'keypairsSet': result} @rbac.allow('all') def create_key_pair(self, context, key_name, **kwargs): - try: - d = defer.Deferred() - p = context.handler.application.settings.get('pool') - def _complete(kwargs): - if 'exception' in kwargs: - d.errback(kwargs['exception']) - return - d.callback({'keyName': key_name, - 'keyFingerprint': kwargs['fingerprint'], - 'keyMaterial': kwargs['private_key']}) - p.apply_async(_gen_key, [context.user.id, key_name], - callback=_complete) - return d - - except manager.UserError as e: - raise + dcall = defer.Deferred() + pool = context.handler.application.settings.get('pool') + def _complete(kwargs): + if 'exception' in kwargs: + dcall.errback(kwargs['exception']) + return + dcall.callback({'keyName': key_name, + 'keyFingerprint': kwargs['fingerprint'], + 'keyMaterial': kwargs['private_key']}) + pool.apply_async(_gen_key, [context.user.id, key_name], + callback=_complete) + return dcall @rbac.allow('all') def delete_key_pair(self, context, key_name, **kwargs): @@ -232,7 +230,7 @@ class CloudController(object): @rbac.allow('all') def describe_security_groups(self, context, group_names, **kwargs): - groups = { 'securityGroupSet': [] } + groups = {'securityGroupSet': []} # Stubbed for now to unblock other things. return groups @@ -251,7 +249,7 @@ class CloudController(object): instance = self._get_instance(context, instance_id[0]) return rpc.call('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "get_console_output", - "args" : {"instance_id": instance_id[0]}}) + "args": {"instance_id": instance_id[0]}}) def _get_user_id(self, context): if context and context.user: @@ -285,10 +283,10 @@ class CloudController(object): if volume['attach_status'] == 'attached': v['attachmentSet'] = [{'attachTime': volume['attach_time'], 'deleteOnTermination': volume['delete_on_termination'], - 'device' : volume['mountpoint'], - 'instanceId' : volume['instance_id'], - 'status' : 'attached', - 'volume_id' : volume['volume_id']}] + 'device': volume['mountpoint'], + 'instanceId': volume['instance_id'], + 'status': 'attached', + 'volume_id': volume['volume_id']}] else: v['attachmentSet'] = [{}] return v @@ -298,7 +296,7 @@ class CloudController(object): def create_volume(self, context, size, **kwargs): # TODO(vish): refactor this to create the volume object here and tell service to create it result = yield rpc.call(FLAGS.volume_topic, {"method": "create_volume", - "args" : {"size": size, + "args": {"size": size, "user_id": context.user.id, "project_id": context.project.id}}) # NOTE(vish): rpc returned value is in the result key in the dictionary @@ -348,15 +346,15 @@ class CloudController(object): compute_node = instance['node_name'] rpc.cast('%s.%s' % (FLAGS.compute_topic, compute_node), {"method": "attach_volume", - "args" : {"volume_id": volume_id, - "instance_id" : instance_id, - "mountpoint" : device}}) - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + "args": {"volume_id": volume_id, + "instance_id": instance_id, + "mountpoint": device}}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) @rbac.allow('projectmanager', 'sysadmin') @@ -372,18 +370,18 @@ class CloudController(object): instance = self._get_instance(context, instance_id) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "detach_volume", - "args" : {"instance_id": instance_id, + "args": {"instance_id": instance_id, "volume_id": volume_id}}) except exception.NotFound: # If the instance doesn't exist anymore, # then we need to call detach blind volume.finish_detach() - return defer.succeed({'attachTime' : volume['attach_time'], - 'device' : volume['mountpoint'], - 'instanceId' : instance_id, - 'requestId' : context.request_id, - 'status' : volume['attach_status'], - 'volumeId' : volume_id}) + return defer.succeed({'attachTime': volume['attach_time'], + 'device': volume['mountpoint'], + 'instanceId': instance_id, + 'requestId': context.request_id, + 'status': volume['attach_status'], + 'volumeId': volume_id}) def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -425,7 +423,8 @@ class CloudController(object): i['key_name'] = instance.get('key_name', None) if context.user.is_admin(): i['key_name'] = '%s (%s, %s)' % (i['key_name'], - instance.get('project_id', None), instance.get('node_name','')) + instance.get('project_id', None), + instance.get('node_name', '')) i['product_codes_set'] = self._convert_to_set( instance.get('product_codes', None), 'product_code') i['instance_type'] = instance.get('instance_type', None) @@ -442,7 +441,7 @@ class CloudController(object): reservations[res_id] = r reservations[res_id]['instances_set'].append(i) - instance_response = {'reservationSet' : list(reservations.values()) } + instance_response = {'reservationSet': list(reservations.values())} return instance_response @rbac.allow('all') @@ -457,7 +456,7 @@ class CloudController(object): address['project_id'] == context.project.id): address_rv = { 'public_ip': address['address'], - 'instance_id' : address.get('instance_id', 'free') + 'instance_id': address.get('instance_id', 'free') } if context.user.is_admin(): address_rv['instance_id'] = "%s (%s, %s)" % ( @@ -477,7 +476,7 @@ class CloudController(object): "args": {"user_id": context.user.id, "project_id": context.project.id}}) public_ip = alloc_result['result'] - defer.returnValue({'addressSet': [{'publicIp' : public_ip}]}) + defer.returnValue({'addressSet': [{'publicIp': public_ip}]}) @rbac.allow('netadmin') @defer.inlineCallbacks @@ -591,7 +590,7 @@ class CloudController(object): inst.save() rpc.cast(FLAGS.compute_topic, {"method": "run_instance", - "args": {"instance_id" : inst.instance_id}}) + "args": {"instance_id": inst.instance_id}}) logging.debug("Casting to node for %s's instance with IP of %s" % (context.user.name, inst['private_dns_name'])) # TODO: Make Network figure out the network name from ip. @@ -646,7 +645,7 @@ class CloudController(object): instance = self._get_instance(context, i) rpc.cast('%s.%s' % (FLAGS.compute_topic, instance['node_name']), {"method": "reboot_instance", - "args" : {"instance_id": i}}) + "args": {"instance_id": i}}) return defer.succeed(True) @rbac.allow('projectmanager', 'sysadmin') @@ -656,7 +655,7 @@ class CloudController(object): volume_node = volume['node_name'] rpc.cast('%s.%s' % (FLAGS.volume_topic, volume_node), {"method": "delete_volume", - "args" : {"volume_id": volume_id}}) + "args": {"volume_id": volume_id}}) return defer.succeed(True) @rbac.allow('all') @@ -689,9 +688,9 @@ class CloudController(object): image = images.list(context, image_id)[0] except IndexError: raise exception.ApiError('invalid id: %s' % image_id) - result = { 'image_id': image_id, 'launchPermission': [] } + result = {'image_id': image_id, 'launchPermission': []} if image['isPublic']: - result['launchPermission'].append({ 'group': 'all' }) + result['launchPermission'].append({'group': 'all'}) return defer.succeed(result) @rbac.allow('projectmanager', 'sysadmin') -- cgit From bfb906cb0235a6e0b037d387aadc4abc2280fea0 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Fri, 13 Aug 2010 11:09:27 -0400 Subject: Support JSON and XML in Serializer --- nova/wsgi.py | 56 ++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 46 insertions(+), 10 deletions(-) diff --git a/nova/wsgi.py b/nova/wsgi.py index 304f7149a..0570e1829 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -218,23 +218,59 @@ class Serializer(object): Serializes a dictionary to a Content Type specified by a WSGI environment. """ - def __init__(self, environ): - """Create a serializer based on the given WSGI environment.""" + def __init__(self, environ, metadata=None): + """ + Create a serializer based on the given WSGI environment. + 'metadata' is an optional dict mapping MIME types to information + needed to serialize a dictionary to that type. + """ self.environ = environ + self.metadata = metadata or {} - def serialize(self, data): + def to_content_type(self, data): """ Serialize a dictionary into a string. The format of the string will be decided based on the Content Type requested in self.environ: by Accept: header, or by URL suffix. """ - req = webob.Request(self.environ) - # TODO(gundlach): do XML correctly and be more robust - if req.accept and 'application/json' in req.accept: + mimetype = 'application/xml' + # TODO(gundlach): determine mimetype from request + + if mimetype == 'application/json': import json return json.dumps(data) + elif mimetype == 'application/xml': + metadata = self.metadata.get('application/xml', {}) + # We expect data to contain a single key which is the XML root. + root_key = data.keys()[0] + from xml.dom import minidom + doc = minidom.Document() + node = self._to_xml_node(doc, metadata, root_key, data[root_key]) + return node.toprettyxml(indent=' ') else: - return '' + repr(data) + \ - '' - - + return repr(data) + + def _to_xml_node(self, doc, metadata, nodename, data): + result = doc.createElement(nodename) + if type(data) is list: + singular = metadata.get('plurals', {}).get(nodename, None) + if singular is None: + if nodename.endswith('s'): + singular = nodename[:-1] + else: + singular = 'item' + for item in data: + node = self._to_xml_node(doc, metadata, singular, item) + result.appendChild(node) + elif type(data) is dict: + attrs = metadata.get('attributes', {}).get(nodename, {}) + for k,v in data.items(): + if k in attrs: + result.setAttribute(k, str(v)) + else: + node = self._to_xml_node(doc, metadata, k, v) + result.appendChild(node) + else: # atom + node = doc.createTextNode(str(data)) + result.appendChild(node) + return result -- cgit From 5c4a806c852a1c7180bc1c7e2ea8f065198e36d2 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 16 Aug 2010 10:57:42 -0400 Subject: PEP8 and name corrections --- bin/nova-rsapi | 2 +- nova/endpoint/aws/__init__.py | 4 ++-- nova/endpoint/rackspace/__init__.py | 10 +++++----- nova/endpoint/rackspace/controllers/base.py | 4 ++-- nova/wsgi.py | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/bin/nova-rsapi b/bin/nova-rsapi index 3fc61860e..a35936eff 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -32,4 +32,4 @@ flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') if __name__ == '__main__': utils.default_flagfile() - wsgi.run_server(nova.endpoint.ApiVersionRouter(), FLAGS.cc_port) + wsgi.run_server(nova.endpoint.APIVersionRouter(), FLAGS.cc_port) diff --git a/nova/endpoint/aws/__init__.py b/nova/endpoint/aws/__init__.py index f49270a30..4507cae62 100644 --- a/nova/endpoint/aws/__init__.py +++ b/nova/endpoint/aws/__init__.py @@ -4,7 +4,7 @@ import webob.dec from nova import wsgi # TODO(gundlach): temp -class Api(wsgi.Router): +class API(wsgi.Router): """WSGI entry point for all AWS API requests.""" def __init__(self): @@ -14,7 +14,7 @@ class Api(wsgi.Router): targets = {"dummy": self.dummy } - super(Api, self).__init__(mapper, targets) + super(API, self).__init__(mapper, targets) @webob.dec.wsgify def dummy(self, req): diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py index f14f6218c..162b35caa 100644 --- a/nova/endpoint/rackspace/__init__.py +++ b/nova/endpoint/rackspace/__init__.py @@ -37,12 +37,12 @@ FLAGS = flags.FLAGS flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') -class Api(wsgi.Middleware): +class API(wsgi.Middleware): """WSGI entry point for all Rackspace API requests.""" def __init__(self): - app = AuthMiddleware(ApiRouter()) - super(Api, self).__init__(app) + app = AuthMiddleware(APIRouter()) + super(API, self).__init__(app) class AuthMiddleware(wsgi.Middleware): @@ -66,7 +66,7 @@ class AuthMiddleware(wsgi.Middleware): return self.application -class ApiRouter(wsgi.Router): +class APIRouter(wsgi.Router): """ Routes requests on the Rackspace API to the appropriate controller and method. @@ -87,4 +87,4 @@ class ApiRouter(wsgi.Router): 'sharedipgroups': controllers.SharedIpGroupsController() } - super(ApiRouter, self).__init__(mapper, targets) + super(APIRouter, self).__init__(mapper, targets) diff --git a/nova/endpoint/rackspace/controllers/base.py b/nova/endpoint/rackspace/controllers/base.py index 3ada53fd4..8cd44f62e 100644 --- a/nova/endpoint/rackspace/controllers/base.py +++ b/nova/endpoint/rackspace/controllers/base.py @@ -1,6 +1,6 @@ -from nova.wsgi import WSGIController +from nova import wsgi -class BaseController(WSGIController): +class BaseController(wsgi.Controller): @classmethod def render(cls, instance): if isinstance(instance, list): diff --git a/nova/wsgi.py b/nova/wsgi.py index 0570e1829..52e155101 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -146,7 +146,7 @@ class Router(object): Each route in `mapper` must specify a 'controller' string, which is a key into the 'targets' dictionary whose value is a WSGI app to - run. If routing to a WSGIController, you'll want to specify + run. If routing to a wsgi.Controller, you'll want to specify 'action' as well so the controller knows what method to call on itself. @@ -195,7 +195,7 @@ class Router(object): return app -class WSGIController(object): +class Controller(object): """ WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method on itself. -- cgit From f78a8936b1a401f07fc0a09d4bd150d2793e436e Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Mon, 16 Aug 2010 13:22:41 -0400 Subject: All controller actions receive a 'req' parameter containing the webob Request. --- nova/endpoint/__init__.py | 10 +++--- nova/endpoint/aws/__init__.py | 6 ++-- nova/endpoint/rackspace/__init__.py | 23 ++++++------ nova/endpoint/rackspace/controllers/servers.py | 2 +- nova/wsgi.py | 48 ++++++++++++-------------- 5 files changed, 41 insertions(+), 48 deletions(-) diff --git a/nova/endpoint/__init__.py b/nova/endpoint/__init__.py index 065f45848..9aae933af 100644 --- a/nova/endpoint/__init__.py +++ b/nova/endpoint/__init__.py @@ -36,16 +36,16 @@ import routes from nova.endpoint import rackspace from nova.endpoint import aws -class ApiVersionRouter(wsgi.Router): +class APIVersionRouter(wsgi.Router): """Routes top-level requests to the appropriate API.""" def __init__(self): mapper = routes.Mapper() - mapper.connect(None, "/v1.0/{path_info:.*}", controller="rs") - mapper.connect(None, "/ec2/{path_info:.*}", controller="ec2") + rsapi = rackspace.API() + mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) - targets = {"rs": rackspace.Api(), "ec2": aws.Api()} + mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) - super(ApiVersionRouter, self).__init__(mapper, targets) + super(APIVersionRouter, self).__init__(mapper) diff --git a/nova/endpoint/aws/__init__.py b/nova/endpoint/aws/__init__.py index 4507cae62..55cbb8fd3 100644 --- a/nova/endpoint/aws/__init__.py +++ b/nova/endpoint/aws/__init__.py @@ -10,11 +10,9 @@ class API(wsgi.Router): def __init__(self): mapper = routes.Mapper() - mapper.connect(None, "{all:.*}", controller="dummy") + mapper.connect(None, "{all:.*}", controller=self.dummy) - targets = {"dummy": self.dummy } - - super(API, self).__init__(mapper, targets) + super(API, self).__init__(mapper) @webob.dec.wsgify def dummy(self, req): diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py index 162b35caa..78b9c9429 100644 --- a/nova/endpoint/rackspace/__init__.py +++ b/nova/endpoint/rackspace/__init__.py @@ -75,16 +75,13 @@ class APIRouter(wsgi.Router): def __init__(self): mapper = routes.Mapper() - mapper.resource("server", "servers") - mapper.resource("image", "images") - mapper.resource("flavor", "flavors") - mapper.resource("sharedipgroup", "sharedipgroups") - - targets = { - 'servers': controllers.ServersController(), - 'images': controllers.ImagesController(), - 'flavors': controllers.FlavorsController(), - 'sharedipgroups': controllers.SharedIpGroupsController() - } - - super(APIRouter, self).__init__(mapper, targets) + mapper.resource("server", "servers", + controller=controllers.ServersController()) + mapper.resource("image", "images", + controller=controllers.ImagesController()) + mapper.resource("flavor", "flavors", + controller=controllers.FlavorsController()) + mapper.resource("sharedipgroup", "sharedipgroups", + controller=controllers.SharedIpGroupsController()) + + super(APIRouter, self).__init__(mapper) diff --git a/nova/endpoint/rackspace/controllers/servers.py b/nova/endpoint/rackspace/controllers/servers.py index db02e058d..2f8e662d6 100644 --- a/nova/endpoint/rackspace/controllers/servers.py +++ b/nova/endpoint/rackspace/controllers/servers.py @@ -5,7 +5,7 @@ from nova.endpoint.rackspace.controllers.base import BaseController class ServersController(BaseController): entity_name = 'servers' - def index(cls): + def index(self, **kwargs): return [instance_details(inst) for inst in compute.InstanceDirectory().all] def show(self, **kwargs): diff --git a/nova/wsgi.py b/nova/wsgi.py index 52e155101..a0a175dc7 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -140,34 +140,31 @@ class Router(object): WSGI middleware that maps incoming requests to WSGI apps. """ - def __init__(self, mapper, targets): + def __init__(self, mapper): """ Create a router for the given routes.Mapper. - Each route in `mapper` must specify a 'controller' string, which is - a key into the 'targets' dictionary whose value is a WSGI app to - run. If routing to a wsgi.Controller, you'll want to specify - 'action' as well so the controller knows what method to call on - itself. + Each route in `mapper` must specify a 'controller', which is a + WSGI app to call. You'll probably want to specify an 'action' as + well and have your controller be a wsgi.Controller, who will route + the request to the action method. Examples: mapper = routes.Mapper() - targets = { "servers": ServerController(), "blog": BlogWsgiApp() } + sc = ServerController() # Explicit mapping of one route to a controller+action - mapper.connect(None, "/svrlist", controller="servers", action="list") + mapper.connect(None, "/svrlist", controller=sc, action="list") - # Controller string is implicitly equal to 2nd param here, and - # actions are all implicitly defined - mapper.resource("server", "servers") + # Actions are all implicitly defined + mapper.resource("server", "servers", controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. - mapper.connect(None, "/v1.0/{path_info:.*}", controller="blog") + mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) """ self.map = mapper - self.targets = targets self._router = routes.middleware.RoutesMiddleware(self._dispatch, self.map) @@ -186,31 +183,32 @@ class Router(object): and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. """ - if req.environ['routes.route'] is None: - return webob.exc.HTTPNotFound() match = req.environ['wsgiorg.routing_args'][1] - app_name = match['controller'] - - app = self.targets[app_name] + if not match: + return webob.exc.HTTPNotFound() + app = match['controller'] return app class Controller(object): """ WSGI app that reads routing information supplied by RoutesMiddleware - and calls the requested action method on itself. + and calls the requested action method upon itself. All action methods + must, in addition to their normal parameters, accept a 'req' argument + which is the incoming webob.Request. """ @webob.dec.wsgify def __call__(self, req): """ - Call the method on self specified in req.environ by RoutesMiddleware. + Call the method specified in req.environ by RoutesMiddleware. """ - routes_dict = req.environ['wsgiorg.routing_args'][1] - action = routes_dict['action'] + arg_dict = req.environ['wsgiorg.routing_args'][1] + action = arg_dict['action'] method = getattr(self, action) - del routes_dict['controller'] - del routes_dict['action'] - return method(**routes_dict) + del arg_dict['controller'] + del arg_dict['action'] + arg_dict['req'] = req + return method(**arg_dict) class Serializer(object): -- cgit From f92851ba8ffcb530f6f3c4ea354dd89d29146f6c Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 17 Aug 2010 13:03:38 -0400 Subject: Remove duplicate definition of flag --- nova/endpoint/rackspace/__init__.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/nova/endpoint/rackspace/__init__.py b/nova/endpoint/rackspace/__init__.py index 78b9c9429..ac53ee10b 100644 --- a/nova/endpoint/rackspace/__init__.py +++ b/nova/endpoint/rackspace/__init__.py @@ -33,10 +33,6 @@ from nova.auth import manager from nova.endpoint.rackspace import controllers -FLAGS = flags.FLAGS -flags.DEFINE_string('cloud_topic', 'cloud', 'the topic clouds listen on') - - class API(wsgi.Middleware): """WSGI entry point for all Rackspace API requests.""" -- cgit From e8be36d7a7be2ebbf5493766ce909d7913bf61e0 Mon Sep 17 00:00:00 2001 From: Michael Gundlach Date: Tue, 17 Aug 2010 13:23:20 -0400 Subject: Move eventlet-using class out of endpoint/__init__.py into its own submodule, so that twisted-related code using endpoint.[other stuff] wouldn't run eventlet and make unit tests throw crazy errors about eventlet 0.9.10 not playing nicely with twisted. --- bin/nova-rsapi | 5 ++--- nova/endpoint/__init__.py | 51 ----------------------------------------------- nova/endpoint/newapi.py | 51 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 54 deletions(-) create mode 100644 nova/endpoint/newapi.py diff --git a/bin/nova-rsapi b/bin/nova-rsapi index a35936eff..e2722422e 100755 --- a/bin/nova-rsapi +++ b/bin/nova-rsapi @@ -21,15 +21,14 @@ Daemon for the Rackspace API endpoint. """ -import nova.endpoint - from nova import flags from nova import utils from nova import wsgi +from nova.endpoint import newapi FLAGS = flags.FLAGS flags.DEFINE_integer('cc_port', 8773, 'cloud controller port') if __name__ == '__main__': utils.default_flagfile() - wsgi.run_server(nova.endpoint.APIVersionRouter(), FLAGS.cc_port) + wsgi.run_server(newapi.APIVersionRouter(), FLAGS.cc_port) diff --git a/nova/endpoint/__init__.py b/nova/endpoint/__init__.py index 9aae933af..e69de29bb 100644 --- a/nova/endpoint/__init__.py +++ b/nova/endpoint/__init__.py @@ -1,51 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:mod:`nova.endpoint` -- Main NOVA Api endpoints -===================================================== - -.. automodule:: nova.endpoint - :platform: Unix - :synopsis: REST APIs for all nova functions -.. moduleauthor:: Jesse Andrews -.. moduleauthor:: Devin Carlen -.. moduleauthor:: Vishvananda Ishaya -.. moduleauthor:: Joshua McKenty -.. moduleauthor:: Manish Singh -.. moduleauthor:: Andy Smith -""" - -from nova import wsgi -import routes -from nova.endpoint import rackspace -from nova.endpoint import aws - -class APIVersionRouter(wsgi.Router): - """Routes top-level requests to the appropriate API.""" - - def __init__(self): - mapper = routes.Mapper() - - rsapi = rackspace.API() - mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) - - mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) - - super(APIVersionRouter, self).__init__(mapper) - diff --git a/nova/endpoint/newapi.py b/nova/endpoint/newapi.py new file mode 100644 index 000000000..9aae933af --- /dev/null +++ b/nova/endpoint/newapi.py @@ -0,0 +1,51 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`nova.endpoint` -- Main NOVA Api endpoints +===================================================== + +.. automodule:: nova.endpoint + :platform: Unix + :synopsis: REST APIs for all nova functions +.. moduleauthor:: Jesse Andrews +.. moduleauthor:: Devin Carlen +.. moduleauthor:: Vishvananda Ishaya +.. moduleauthor:: Joshua McKenty +.. moduleauthor:: Manish Singh +.. moduleauthor:: Andy Smith +""" + +from nova import wsgi +import routes +from nova.endpoint import rackspace +from nova.endpoint import aws + +class APIVersionRouter(wsgi.Router): + """Routes top-level requests to the appropriate API.""" + + def __init__(self): + mapper = routes.Mapper() + + rsapi = rackspace.API() + mapper.connect(None, "/v1.0/{path_info:.*}", controller=rsapi) + + mapper.connect(None, "/ec2/{path_info:.*}", controller=aws.API()) + + super(APIVersionRouter, self).__init__(mapper) + -- cgit