diff options
| author | Justin Santa Barbara <justinsb@justinsb-desktop> | 2010-10-14 12:59:36 -0700 |
|---|---|---|
| committer | Justin Santa Barbara <justinsb@justinsb-desktop> | 2010-10-14 12:59:36 -0700 |
| commit | d8643f1e15f241db96893d1ea41083a2bee65dbd (patch) | |
| tree | 12e9e85733306f97b12b99339edbe49ef4031418 /nova/api | |
| parent | 759bab6059ef2e4c463a73e12fe85fe4b147eba7 (diff) | |
| parent | 3363b133a927509432cb42d77abf18d3d5248abf (diff) | |
| download | nova-d8643f1e15f241db96893d1ea41083a2bee65dbd.tar.gz nova-d8643f1e15f241db96893d1ea41083a2bee65dbd.tar.xz nova-d8643f1e15f241db96893d1ea41083a2bee65dbd.zip | |
Merged with trunk, fixed broken stuff
Diffstat (limited to 'nova/api')
24 files changed, 2922 insertions, 186 deletions
diff --git a/nova/api/__init__.py b/nova/api/__init__.py index a6bb93348..8ec7094d7 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -21,18 +21,91 @@ Root WSGI middleware for all API controllers. """ import routes +import webob.dec +from nova import flags from nova import wsgi +from nova.api import cloudpipe from nova.api import ec2 -from nova.api import rackspace +from nova.api import openstack +from nova.api.ec2 import metadatarequesthandler + + +flags.DEFINE_string('osapi_subdomain', 'api', + 'subdomain running the OpenStack API') +flags.DEFINE_string('ec2api_subdomain', 'ec2', + 'subdomain running the EC2 API') +flags.DEFINE_string('FAKE_subdomain', None, + 'set to api or ec2 to fake the subdomain of the host for testing') +FLAGS = flags.FLAGS class API(wsgi.Router): """Routes top-level requests to the appropriate controller.""" def __init__(self): + osapidomain = {'sub_domain': [FLAGS.osapi_subdomain]} + ec2domain = {'sub_domain': [FLAGS.ec2api_subdomain]} + # If someone wants to pretend they're hitting the OSAPI subdomain + # on their local box, they can set FAKE_subdomain to 'api', which + # removes subdomain restrictions from the OpenStack API routes below. + if FLAGS.FAKE_subdomain == 'api': + osapidomain = {} + elif FLAGS.FAKE_subdomain == 'ec2': + ec2domain = {} mapper = routes.Mapper() - mapper.connect(None, "/v1.0/{path_info:.*}", - controller=rackspace.API()) - mapper.connect(None, "/ec2/{path_info:.*}", controller=ec2.API()) + mapper.sub_domains = True + mapper.connect("/", controller=self.osapi_versions, + conditions=osapidomain) + mapper.connect("/v1.0/{path_info:.*}", controller=openstack.API(), + conditions=osapidomain) + + mapper.connect("/", controller=self.ec2api_versions, + conditions=ec2domain) + mapper.connect("/services/{path_info:.*}", controller=ec2.API(), + conditions=ec2domain) + mapper.connect("/cloudpipe/{path_info:.*}", controller=cloudpipe.API()) + mrh = metadatarequesthandler.MetadataRequestHandler() + for s in ['/latest', + '/2009-04-04', + '/2008-09-01', + '/2008-02-01', + '/2007-12-15', + '/2007-10-10', + '/2007-08-29', + '/2007-03-01', + '/2007-01-19', + '/1.0']: + mapper.connect('%s/{path_info:.*}' % s, controller=mrh, + conditions=ec2domain) super(API, self).__init__(mapper) + + @webob.dec.wsgify + def osapi_versions(self, req): + """Respond to a request for all OpenStack API versions.""" + response = { + "versions": [ + dict(status="CURRENT", id="v1.0")]} + metadata = { + "application/xml": { + "attributes": dict(version=["status", "id"])}} + return wsgi.Serializer(req.environ, metadata).to_content_type(response) + + @webob.dec.wsgify + def ec2api_versions(self, req): + """Respond to a request for all EC2 versions.""" + # available api versions + versions = [ + '1.0', + '2007-01-19', + '2007-03-01', + '2007-08-29', + '2007-10-10', + '2007-12-15', + '2008-02-01', + '2008-09-01', + '2009-04-04', + ] + return ''.join('%s\n' % v for v in versions) + + diff --git a/nova/api/cloud.py b/nova/api/cloud.py new file mode 100644 index 000000000..57e94a17a --- /dev/null +++ b/nova/api/cloud.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Methods for API calls to control instances via AMQP. +""" + + +from nova import db +from nova import flags +from nova import rpc + +FLAGS = flags.FLAGS + + +def reboot(instance_id, context=None): + """Reboot the given instance. + + #TODO(gundlach) not actually sure what context is used for by ec2 here + -- I think we can just remove it and use None all the time. + """ + instance_ref = db.instance_get_by_internal_id(None, instance_id) + host = instance_ref['host'] + rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "reboot_instance", + "args": {"context": None, + "instance_id": instance_ref['id']}}) diff --git a/nova/api/cloudpipe/__init__.py b/nova/api/cloudpipe/__init__.py new file mode 100644 index 000000000..6d40990a8 --- /dev/null +++ b/nova/api/cloudpipe/__init__.py @@ -0,0 +1,69 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +REST API Request Handlers for CloudPipe +""" + +import logging +import urllib +import webob +import webob.dec +import webob.exc + +from nova import crypto +from nova import wsgi +from nova.auth import manager +from nova.api.ec2 import cloud + + +_log = logging.getLogger("api") +_log.setLevel(logging.DEBUG) + + +class API(wsgi.Application): + + def __init__(self): + self.controller = cloud.CloudController() + + @webob.dec.wsgify + def __call__(self, req): + if req.method == 'POST': + return self.sign_csr(req) + _log.debug("Cloudpipe path is %s" % req.path_info) + if req.path_info.endswith("/getca/"): + return self.send_root_ca(req) + return webob.exc.HTTPNotFound() + + def get_project_id_from_ip(self, ip): + # TODO(eday): This was removed with the ORM branch, fix! + instance = self.controller.get_instance_by_ip(ip) + return instance['project_id'] + + def send_root_ca(self, req): + _log.debug("Getting root ca") + project_id = self.get_project_id_from_ip(req.remote_addr) + res = webob.Response() + res.headers["Content-Type"] = "text/plain" + res.body = crypto.fetch_ca(project_id) + return res + + def sign_csr(self, req): + project_id = self.get_project_id_from_ip(req.remote_addr) + cert = self.str_params['cert'] + return crypto.sign_csr(urllib.unquote(cert), project_id) diff --git a/nova/api/context.py b/nova/api/context.py new file mode 100644 index 000000000..b66cfe468 --- /dev/null +++ b/nova/api/context.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +APIRequestContext +""" + +import random + + +class APIRequestContext(object): + def __init__(self, user, project): + self.user = user + self.project = project + self.request_id = ''.join( + [random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-') + for x in xrange(20)] + ) + if user: + self.is_admin = user.is_admin() + else: + self.is_admin = False + self.read_deleted = False + + +def get_admin_context(user=None, read_deleted=False): + context_ref = APIRequestContext(user=user, project=None) + context_ref.is_admin = True + context_ref.read_deleted = read_deleted + return context_ref + diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 6eec0abf7..6e771f064 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 OpenStack LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -15,28 +16,227 @@ # License for the specific language governing permissions and limitations # under the License. -""" -WSGI middleware for EC2 API controllers. -""" +"""Starting point for routing EC2 requests""" +import logging import routes +import webob import webob.dec +import webob.exc +from nova import exception +from nova import flags from nova import wsgi +from nova.api import context +from nova.api.ec2 import apirequest +from nova.api.ec2 import admin +from nova.api.ec2 import cloud +from nova.auth import manager -class API(wsgi.Router): - """Routes EC2 requests to the appropriate controller.""" +FLAGS = flags.FLAGS +_log = logging.getLogger("api") +_log.setLevel(logging.DEBUG) + + +class API(wsgi.Middleware): + + """Routing for all EC2 API requests.""" def __init__(self): - mapper = routes.Mapper() - mapper.connect(None, "{all:.*}", controller=self.dummy) - super(API, self).__init__(mapper) + self.application = Authenticate(Router(Authorizer(Executor()))) + + +class Authenticate(wsgi.Middleware): + + """Authenticate an EC2 request and add 'ec2.context' to WSGI environ.""" + + @webob.dec.wsgify + def __call__(self, req): + # Read request signature and access id. + try: + signature = req.params['Signature'] + access = req.params['AWSAccessKeyId'] + except: + raise webob.exc.HTTPBadRequest() + + # Make a copy of args for authentication and signature verification. + auth_params = dict(req.params) + auth_params.pop('Signature') # not part of authentication args + + # Authenticate the request. + try: + (user, project) = manager.AuthManager().authenticate( + access, + signature, + auth_params, + req.method, + req.host, + req.path) + except exception.Error, ex: + logging.debug("Authentication Failure: %s" % ex) + raise webob.exc.HTTPForbidden() + + # Authenticated! + req.environ['ec2.context'] = context.APIRequestContext(user, project) + return self.application + + +class Router(wsgi.Middleware): + + """Add ec2.'controller', .'action', and .'action_args' to WSGI environ.""" + + def __init__(self, application): + super(Router, self).__init__(application) + self.map = routes.Mapper() + self.map.connect("/{controller_name}/") + self.controllers = dict(Cloud=cloud.CloudController(), + Admin=admin.AdminController()) - @staticmethod @webob.dec.wsgify - def dummy(req): - """Temporary dummy controller.""" - msg = "dummy response -- please hook up __init__() to cloud.py instead" - return repr({'dummy': msg, - 'kwargs': repr(req.environ['wsgiorg.routing_args'][1])}) + def __call__(self, req): + # Obtain the appropriate controller and action for this request. + try: + match = self.map.match(req.path_info) + controller_name = match['controller_name'] + controller = self.controllers[controller_name] + except: + raise webob.exc.HTTPNotFound() + non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod', + 'SignatureVersion', 'Version', 'Timestamp'] + args = dict(req.params) + try: + action = req.params['Action'] # raise KeyError if omitted + for non_arg in non_args: + args.pop(non_arg) # remove, but raise KeyError if omitted + except: + raise webob.exc.HTTPBadRequest() + + _log.debug('action: %s' % action) + for key, value in args.items(): + _log.debug('arg: %s\t\tval: %s' % (key, value)) + + # Success! + req.environ['ec2.controller'] = controller + req.environ['ec2.action'] = action + req.environ['ec2.action_args'] = args + return self.application + + +class Authorizer(wsgi.Middleware): + + """Authorize an EC2 API request. + + Return a 401 if ec2.controller and ec2.action in WSGI environ may not be + executed in ec2.context. + """ + + def __init__(self, application): + super(Authorizer, self).__init__(application) + self.action_roles = { + 'CloudController': { + 'DescribeAvailabilityzones': ['all'], + 'DescribeRegions': ['all'], + 'DescribeSnapshots': ['all'], + 'DescribeKeyPairs': ['all'], + 'CreateKeyPair': ['all'], + 'DeleteKeyPair': ['all'], + 'DescribeSecurityGroups': ['all'], + 'AuthorizeSecurityGroupIngress': ['netadmin'], + 'RevokeSecurityGroupIngress': ['netadmin'], + 'CreateSecurityGroup': ['netadmin'], + 'DeleteSecurityGroup': ['netadmin'], + 'GetConsoleOutput': ['projectmanager', 'sysadmin'], + 'DescribeVolumes': ['projectmanager', 'sysadmin'], + 'CreateVolume': ['projectmanager', 'sysadmin'], + 'AttachVolume': ['projectmanager', 'sysadmin'], + 'DetachVolume': ['projectmanager', 'sysadmin'], + 'DescribeInstances': ['all'], + 'DescribeAddresses': ['all'], + 'AllocateAddress': ['netadmin'], + 'ReleaseAddress': ['netadmin'], + 'AssociateAddress': ['netadmin'], + 'DisassociateAddress': ['netadmin'], + 'RunInstances': ['projectmanager', 'sysadmin'], + 'TerminateInstances': ['projectmanager', 'sysadmin'], + 'RebootInstances': ['projectmanager', 'sysadmin'], + 'UpdateInstance': ['projectmanager', 'sysadmin'], + 'DeleteVolume': ['projectmanager', 'sysadmin'], + 'DescribeImages': ['all'], + 'DeregisterImage': ['projectmanager', 'sysadmin'], + 'RegisterImage': ['projectmanager', 'sysadmin'], + 'DescribeImageAttribute': ['all'], + 'ModifyImageAttribute': ['projectmanager', 'sysadmin'], + 'UpdateImage': ['projectmanager', 'sysadmin'], + }, + 'AdminController': { + # All actions have the same permission: ['none'] (the default) + # superusers will be allowed to run them + # all others will get HTTPUnauthorized. + }, + } + + @webob.dec.wsgify + def __call__(self, req): + context = req.environ['ec2.context'] + controller_name = req.environ['ec2.controller'].__class__.__name__ + action = req.environ['ec2.action'] + allowed_roles = self.action_roles[controller_name].get(action, ['none']) + if self._matches_any_role(context, allowed_roles): + return self.application + else: + raise webob.exc.HTTPUnauthorized() + + def _matches_any_role(self, context, roles): + """Return True if any role in roles is allowed in context.""" + if context.user.is_superuser(): + return True + if 'all' in roles: + return True + if 'none' in roles: + return False + return any(context.project.has_role(context.user.id, role) + for role in roles) + + +class Executor(wsgi.Application): + + """Execute an EC2 API request. + + Executes 'ec2.action' upon 'ec2.controller', passing 'ec2.context' and + 'ec2.action_args' (all variables in WSGI environ.) Returns an XML + response, or a 400 upon failure. + """ + + @webob.dec.wsgify + def __call__(self, req): + context = req.environ['ec2.context'] + controller = req.environ['ec2.controller'] + action = req.environ['ec2.action'] + args = req.environ['ec2.action_args'] + + api_request = apirequest.APIRequest(controller, action) + try: + result = api_request.send(context, **args) + req.headers['Content-Type'] = 'text/xml' + return result + except exception.ApiError as ex: + + if ex.code: + return self._error(req, ex.code, ex.message) + else: + return self._error(req, type(ex).__name__, ex.message) + # TODO(vish): do something more useful with unknown exceptions + except Exception as ex: + return self._error(req, type(ex).__name__, str(ex)) + + def _error(self, req, code, message): + resp = webob.Response() + resp.status = 400 + resp.headers['Content-Type'] = 'text/xml' + resp.body = ('<?xml version="1.0"?>\n' + '<Response><Errors><Error><Code>%s</Code>' + '<Message>%s</Message></Error></Errors>' + '<RequestID>?</RequestID></Response>') % (code, message) + return resp + diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py new file mode 100644 index 000000000..36feae451 --- /dev/null +++ b/nova/api/ec2/admin.py @@ -0,0 +1,183 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Admin API controller, exposed through http via the api worker. +""" + +import base64 + +from nova import db +from nova import exception +from nova.auth import manager + + +def user_dict(user, base64_file=None): + """Convert the user object to a result dict""" + if user: + return { + 'username': user.id, + 'accesskey': user.access, + 'secretkey': user.secret, + 'file': base64_file} + else: + return {} + + +def project_dict(project): + """Convert the project object to a result dict""" + if project: + return { + 'projectname': project.id, + 'project_manager_id': project.project_manager_id, + 'description': project.description} + else: + return {} + + +def host_dict(host): + """Convert a host model object to a result dict""" + if host: + return host.state + else: + return {} + + +class AdminController(object): + """ + API Controller for users, hosts, nodes, and workers. + """ + + def __str__(self): + return 'AdminController' + + def describe_user(self, _context, name, **_kwargs): + """Returns user data, including access and secret keys.""" + return user_dict(manager.AuthManager().get_user(name)) + + def describe_users(self, _context, **_kwargs): + """Returns all users - should be changed to deal with a list.""" + return {'userSet': + [user_dict(u) for u in manager.AuthManager().get_users()] } + + def register_user(self, _context, name, **_kwargs): + """Creates a new user, and returns generated credentials.""" + return user_dict(manager.AuthManager().create_user(name)) + + def deregister_user(self, _context, name, **_kwargs): + """Deletes a single user (NOT undoable.) + Should throw an exception if the user has instances, + volumes, or buckets remaining. + """ + manager.AuthManager().delete_user(name) + + return True + + def describe_roles(self, context, project_roles=True, **kwargs): + """Returns a list of allowed roles.""" + roles = manager.AuthManager().get_roles(project_roles) + return { 'roles': [{'role': r} for r in roles]} + + def describe_user_roles(self, context, user, project=None, **kwargs): + """Returns a list of roles for the given user. + Omitting project will return any global roles that the user has. + Specifying project will return only project specific roles. + """ + roles = manager.AuthManager().get_user_roles(user, project=project) + return { 'roles': [{'role': r} for r in roles]} + + def modify_user_role(self, context, user, role, project=None, + operation='add', **kwargs): + """Add or remove a role for a user and project.""" + if operation == 'add': + manager.AuthManager().add_role(user, role, project) + elif operation == 'remove': + manager.AuthManager().remove_role(user, role, project) + else: + raise exception.ApiError('operation must be add or remove') + + return True + + def generate_x509_for_user(self, _context, name, project=None, **kwargs): + """Generates and returns an x509 certificate for a single user. + Is usually called from a client that will wrap this with + access and secret key info, and return a zip file. + """ + if project is None: + project = name + project = manager.AuthManager().get_project(project) + user = manager.AuthManager().get_user(name) + return user_dict(user, base64.b64encode(project.get_credentials(user))) + + def describe_project(self, context, name, **kwargs): + """Returns project data, including member ids.""" + return project_dict(manager.AuthManager().get_project(name)) + + def describe_projects(self, context, user=None, **kwargs): + """Returns all projects - should be changed to deal with a list.""" + return {'projectSet': + [project_dict(u) for u in + manager.AuthManager().get_projects(user=user)]} + + def register_project(self, context, name, manager_user, description=None, + member_users=None, **kwargs): + """Creates a new project""" + return project_dict( + manager.AuthManager().create_project( + name, + manager_user, + description=None, + member_users=None)) + + def deregister_project(self, context, name): + """Permanently deletes a project.""" + manager.AuthManager().delete_project(name) + return True + + def describe_project_members(self, context, name, **kwargs): + project = manager.AuthManager().get_project(name) + result = { + 'members': [{'member': m} for m in project.member_ids]} + return result + + def modify_project_member(self, context, user, project, operation, **kwargs): + """Add or remove a user from a project.""" + if operation =='add': + manager.AuthManager().add_to_project(user, project) + elif operation == 'remove': + manager.AuthManager().remove_from_project(user, project) + else: + raise exception.ApiError('operation must be add or remove') + return True + + # FIXME(vish): these host commands don't work yet, perhaps some of the + # required data can be retrieved from service objects? + def describe_hosts(self, _context, **_kwargs): + """Returns status info for all nodes. Includes: + * Disk Space + * Instance List + * RAM used + * CPU used + * DHCP servers running + * Iptables / bridges + """ + return {'hostSet': [host_dict(h) for h in db.host_get_all()]} + + def describe_host(self, _context, name, **_kwargs): + """Returns status info for single node.""" + return host_dict(db.host_get(name)) diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py new file mode 100644 index 000000000..a87c21fb3 --- /dev/null +++ b/nova/api/ec2/apirequest.py @@ -0,0 +1,131 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +APIRequest class +""" + +import logging +import re +# TODO(termie): replace minidom with etree +from xml.dom import minidom + +_log = logging.getLogger("api") +_log.setLevel(logging.DEBUG) + + +_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') + + +def _camelcase_to_underscore(str): + return _c2u.sub(r'_\1', str).lower().strip('_') + + +def _underscore_to_camelcase(str): + return ''.join([x[:1].upper() + x[1:] for x in str.split('_')]) + + +def _underscore_to_xmlcase(str): + res = _underscore_to_camelcase(str) + return res[:1].lower() + res[1:] + + +class APIRequest(object): + def __init__(self, controller, action): + self.controller = controller + self.action = action + + def send(self, context, **kwargs): + try: + method = getattr(self.controller, + _camelcase_to_underscore(self.action)) + except AttributeError: + _error = ('Unsupported API request: controller = %s,' + 'action = %s') % (self.controller, self.action) + _log.warning(_error) + # TODO: Raise custom exception, trap in apiserver, + # and reraise as 400 error. + raise Exception(_error) + + args = {} + for key, value in kwargs.items(): + parts = key.split(".") + key = _camelcase_to_underscore(parts[0]) + if len(parts) > 1: + d = args.get(key, {}) + d[parts[1]] = value + value = d + args[key] = value + + for key in args.keys(): + if isinstance(args[key], dict): + if args[key] != {} and args[key].keys()[0].isdigit(): + s = args[key].items() + s.sort() + args[key] = [v for k, v in s] + + result = method(context, **args) + return self._render_response(result, context.request_id) + + def _render_response(self, response_data, request_id): + xml = minidom.Document() + + response_el = xml.createElement(self.action + 'Response') + response_el.setAttribute('xmlns', + 'http://ec2.amazonaws.com/doc/2009-11-30/') + request_id_el = xml.createElement('requestId') + request_id_el.appendChild(xml.createTextNode(request_id)) + response_el.appendChild(request_id_el) + if(response_data == True): + self._render_dict(xml, response_el, {'return': 'true'}) + else: + self._render_dict(xml, response_el, response_data) + + xml.appendChild(response_el) + + response = xml.toxml() + xml.unlink() + _log.debug(response) + return response + + def _render_dict(self, xml, el, data): + try: + for key in data.keys(): + val = data[key] + el.appendChild(self._render_data(xml, key, val)) + except: + _log.debug(data) + raise + + def _render_data(self, xml, el_name, data): + el_name = _underscore_to_xmlcase(el_name) + data_el = xml.createElement(el_name) + + if isinstance(data, list): + for item in data: + data_el.appendChild(self._render_data(xml, 'item', item)) + elif isinstance(data, dict): + self._render_dict(xml, data_el, data) + elif hasattr(data, '__dict__'): + self._render_dict(xml, data_el, data.__dict__) + elif isinstance(data, bool): + data_el.appendChild(xml.createTextNode(str(data).lower())) + elif data != None: + data_el.appendChild(xml.createTextNode(str(data))) + + return data_el diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py new file mode 100644 index 000000000..ee45374b2 --- /dev/null +++ b/nova/api/ec2/cloud.py @@ -0,0 +1,1027 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Cloud Controller: Implementation of EC2 REST API calls, which are +dispatched to other nodes via AMQP RPC. State is via distributed +datastore. +""" + +import base64 +import datetime +import logging +import os +import time + +import IPy + +from nova import crypto +from nova import db +from nova import exception +from nova import flags +from nova import quota +from nova import rpc +from nova import utils +from nova.compute.instance_types import INSTANCE_TYPES +from nova.api import cloud +from nova.api.ec2 import images + + +FLAGS = flags.FLAGS +flags.DECLARE('storage_availability_zone', 'nova.volume.manager') + +InvalidInputException = exception.InvalidInputException + +class QuotaError(exception.ApiError): + """Quota Exceeeded""" + pass + + +def _gen_key(context, user_id, key_name): + """Generate a key + + This is a module level method because it is slow and we need to defer + it into a process pool.""" + # NOTE(vish): generating key pair is slow so check for legal + # creation before creating key_pair + try: + db.key_pair_get(context, user_id, key_name) + raise exception.Duplicate("The key_pair %s already exists" + % key_name) + except exception.NotFound: + pass + private_key, public_key, fingerprint = crypto.generate_key_pair() + key = {} + key['user_id'] = user_id + key['name'] = key_name + key['public_key'] = public_key + key['fingerprint'] = fingerprint + db.key_pair_create(context, key) + return {'private_key': private_key, 'fingerprint': fingerprint} + + +def ec2_id_to_internal_id(ec2_id): + """Convert an ec2 ID (i-[base 36 number]) to an internal id (int)""" + return int(ec2_id[2:], 36) + + +def internal_id_to_ec2_id(internal_id): + """Convert an internal ID (int) to an ec2 ID (i-[base 36 number])""" + digits = [] + while internal_id != 0: + internal_id, remainder = divmod(internal_id, 36) + digits.append('0123456789abcdefghijklmnopqrstuvwxyz'[remainder]) + return "i-%s" % ''.join(reversed(digits)) + + +class CloudController(object): + """ CloudController provides the critical dispatch between + inbound API calls through the endpoint and messages + sent to the other nodes. +""" + def __init__(self): + self.network_manager = utils.import_object(FLAGS.network_manager) + self.setup() + + def __str__(self): + return 'CloudController' + + def setup(self): + """ Ensure the keychains and folders exist. """ + # FIXME(ja): this should be moved to a nova-manage command, + # if not setup throw exceptions instead of running + # Create keys folder, if it doesn't exist + if not os.path.exists(FLAGS.keys_path): + os.makedirs(FLAGS.keys_path) + # Gen root CA, if we don't have one + root_ca_path = os.path.join(FLAGS.ca_path, FLAGS.ca_file) + if not os.path.exists(root_ca_path): + start = os.getcwd() + os.chdir(FLAGS.ca_path) + # TODO(vish): Do this with M2Crypto instead + utils.runthis("Generating root CA: %s", "sh genrootca.sh") + os.chdir(start) + + def _get_mpi_data(self, project_id): + result = {} + for instance in db.instance_get_all_by_project(None, project_id): + if instance['fixed_ip']: + line = '%s slots=%d' % (instance['fixed_ip']['address'], + INSTANCE_TYPES[instance['instance_type']]['vcpus']) + key = str(instance['key_name']) + if key in result: + result[key].append(line) + else: + result[key] = [line] + return result + + def _trigger_refresh_security_group(self, security_group): + nodes = set([instance['host'] for instance in security_group.instances + if instance['host'] is not None]) + for node in nodes: + rpc.call('%s.%s' % (FLAGS.compute_topic, node), + { "method": "refresh_security_group", + "args": { "context": None, + "security_group_id": security_group.id}}) + + def get_metadata(self, address): + instance_ref = db.fixed_ip_get_instance(None, address) + if instance_ref is None: + return None + mpi = self._get_mpi_data(instance_ref['project_id']) + if instance_ref['key_name']: + keys = { + '0': { + '_name': instance_ref['key_name'], + 'openssh-key': instance_ref['key_data'] + } + } + else: + keys = '' + hostname = instance_ref['hostname'] + floating_ip = db.instance_get_floating_address(None, + instance_ref['id']) + data = { + 'user-data': base64.b64decode(instance_ref['user_data']), + 'meta-data': { + 'ami-id': instance_ref['image_id'], + 'ami-launch-index': instance_ref['launch_index'], + 'ami-manifest-path': 'FIXME', + 'block-device-mapping': { # TODO(vish): replace with real data + 'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': '/dev/sda1', + 'swap': 'sda3' + }, + 'hostname': hostname, + 'instance-action': 'none', + 'instance-id': internal_id_to_ec2_id(instance_ref['internal_id']), + 'instance-type': instance_ref['instance_type'], + 'local-hostname': hostname, + 'local-ipv4': address, + 'kernel-id': instance_ref['kernel_id'], + 'placement': { + 'availability-zone': 'nova' # TODO(vish): real zone + }, + 'public-hostname': hostname, + 'public-ipv4': floating_ip or '', + 'public-keys': keys, + 'ramdisk-id': instance_ref['ramdisk_id'], + 'reservation-id': instance_ref['reservation_id'], + 'security-groups': '', + 'mpi': mpi + } + } + if False: # TODO(vish): store ancestor ids + data['ancestor-ami-ids'] = [] + if False: # TODO(vish): store product codes + data['product-codes'] = [] + return data + + def describe_availability_zones(self, context, **kwargs): + return {'availabilityZoneInfo': [{'zoneName': 'nova', + 'zoneState': 'available'}]} + + def describe_regions(self, context, region_name=None, **kwargs): + if FLAGS.region_list: + regions = [] + for region in FLAGS.region_list: + name, _sep, url = region.partition('=') + regions.append({'regionName': name, + 'regionEndpoint': url}) + else: + regions = [{'regionName': 'nova', + 'regionEndpoint': FLAGS.ec2_url}] + if region_name: + regions = [r for r in regions if r['regionName'] in region_name] + return {'regionInfo': regions } + + def describe_snapshots(self, + context, + snapshot_id=None, + owner=None, + restorable_by=None, + **kwargs): + return {'snapshotSet': [{'snapshotId': 'fixme', + 'volumeId': 'fixme', + 'status': 'fixme', + 'startTime': 'fixme', + 'progress': 'fixme', + 'ownerId': 'fixme', + 'volumeSize': 0, + 'description': 'fixme'}]} + + def describe_key_pairs(self, context, key_name=None, **kwargs): + key_pairs = db.key_pair_get_all_by_user(context, context.user.id) + if not key_name is None: + key_pairs = [x for x in key_pairs if x['name'] in key_name] + + result = [] + for key_pair in key_pairs: + # filter out the vpn keys + suffix = FLAGS.vpn_key_suffix + if context.user.is_admin() or not key_pair['name'].endswith(suffix): + result.append({ + 'keyName': key_pair['name'], + 'keyFingerprint': key_pair['fingerprint'], + }) + + return {'keypairsSet': result} + + def create_key_pair(self, context, key_name, **kwargs): + data = _gen_key(None, context.user.id, key_name) + return {'keyName': key_name, + 'keyFingerprint': data['fingerprint'], + 'keyMaterial': data['private_key']} + # TODO(vish): when context is no longer an object, pass it here + + def delete_key_pair(self, context, key_name, **kwargs): + try: + db.key_pair_destroy(context, context.user.id, key_name) + except exception.NotFound: + # aws returns true even if the key doesn't exist + pass + return True + + def describe_security_groups(self, context, group_name=None, **kwargs): + self._ensure_default_security_group(context) + if context.user.is_admin(): + groups = db.security_group_get_all(context) + else: + groups = db.security_group_get_by_project(context, + context.project.id) + groups = [self._format_security_group(context, g) for g in groups] + if not group_name is None: + groups = [g for g in groups if g.name in group_name] + + return {'securityGroupInfo': groups } + + def _format_security_group(self, context, group): + g = {} + g['groupDescription'] = group.description + g['groupName'] = group.name + g['ownerId'] = group.project_id + g['ipPermissions'] = [] + for rule in group.rules: + r = {} + r['ipProtocol'] = rule.protocol + r['fromPort'] = rule.from_port + r['toPort'] = rule.to_port + r['groups'] = [] + r['ipRanges'] = [] + if rule.group_id: + source_group = db.security_group_get(context, rule.group_id) + r['groups'] += [{'groupName': source_group.name, + 'userId': source_group.project_id}] + else: + r['ipRanges'] += [{'cidrIp': rule.cidr}] + g['ipPermissions'] += [r] + return g + + + def _authorize_revoke_rule_args_to_dict(self, context, + to_port=None, from_port=None, + ip_protocol=None, cidr_ip=None, + user_id=None, + source_security_group_name=None, + source_security_group_owner_id=None): + + values = {} + + if source_security_group_name: + source_project_id = self._get_source_project_id(context, + source_security_group_owner_id) + + source_security_group = \ + db.security_group_get_by_name(context, + source_project_id, + source_security_group_name) + values['group_id'] = source_security_group['id'] + elif cidr_ip: + # If this fails, it throws an exception. This is what we want. + IPy.IP(cidr_ip) + values['cidr'] = cidr_ip + else: + values['cidr'] = '0.0.0.0/0' + + if ip_protocol and from_port and to_port: + from_port = int(from_port) + to_port = int(to_port) + ip_protocol = str(ip_protocol) + + if ip_protocol.upper() not in ['TCP','UDP','ICMP']: + raise InvalidInputException('%s is not a valid ipProtocol' % + (ip_protocol,)) + if ((min(from_port, to_port) < -1) or + (max(from_port, to_port) > 65535)): + raise InvalidInputException('Invalid port range') + + values['protocol'] = ip_protocol + values['from_port'] = from_port + values['to_port'] = to_port + else: + # If cidr based filtering, protocol and ports are mandatory + if 'cidr' in values: + return None + + return values + + + def _security_group_rule_exists(self, security_group, values): + """Indicates whether the specified rule values are already + defined in the given security group. + """ + for rule in security_group.rules: + if 'group_id' in values: + if rule['group_id'] == values['group_id']: + return True + else: + is_duplicate = True + for key in ('cidr', 'from_port', 'to_port', 'protocol'): + if rule[key] != values[key]: + is_duplicate = False + break + if is_duplicate: + return True + return False + + + def revoke_security_group_ingress(self, context, group_name, **kwargs): + self._ensure_default_security_group(context) + security_group = db.security_group_get_by_name(context, + context.project.id, + group_name) + + criteria = self._authorize_revoke_rule_args_to_dict(context, **kwargs) + if criteria == None: + raise exception.ApiError("No rule for the specified parameters.") + + for rule in security_group.rules: + match = True + for (k,v) in criteria.iteritems(): + if getattr(rule, k, False) != v: + match = False + if match: + db.security_group_rule_destroy(context, rule['id']) + self._trigger_refresh_security_group(security_group) + return True + raise exception.ApiError("No rule for the specified parameters.") + + # TODO(soren): This has only been tested with Boto as the client. + # Unfortunately, it seems Boto is using an old API + # for these operations, so support for newer API versions + # is sketchy. + def authorize_security_group_ingress(self, context, group_name, **kwargs): + self._ensure_default_security_group(context) + security_group = db.security_group_get_by_name(context, + context.project.id, + group_name) + + values = self._authorize_revoke_rule_args_to_dict(context, **kwargs) + values['parent_group_id'] = security_group.id + + if self._security_group_rule_exists(security_group, values): + raise exception.ApiError('This rule already exists in group %s' % + group_name) + + security_group_rule = db.security_group_rule_create(context, values) + + self._trigger_refresh_security_group(security_group) + + return True + + + def _get_source_project_id(self, context, source_security_group_owner_id): + if source_security_group_owner_id: + # Parse user:project for source group. + source_parts = source_security_group_owner_id.split(':') + + # If no project name specified, assume it's same as user name. + # Since we're looking up by project name, the user name is not + # used here. It's only read for EC2 API compatibility. + if len(source_parts) == 2: + source_project_id = source_parts[1] + else: + source_project_id = source_parts[0] + else: + source_project_id = context.project.id + + return source_project_id + + + def create_security_group(self, context, group_name, group_description): + self._ensure_default_security_group(context) + if db.security_group_exists(context, context.project.id, group_name): + raise exception.ApiError('group %s already exists' % group_name) + + group = {'user_id' : context.user.id, + 'project_id': context.project.id, + 'name': group_name, + 'description': group_description} + group_ref = db.security_group_create(context, group) + + return {'securityGroupSet': [self._format_security_group(context, + group_ref)]} + + + def delete_security_group(self, context, group_name, **kwargs): + security_group = db.security_group_get_by_name(context, + context.project.id, + group_name) + db.security_group_destroy(context, security_group.id) + return True + + + def get_console_output(self, context, instance_id, **kwargs): + # instance_id is passed in as a list of instances + ec2_id = instance_id[0] + internal_id = ec2_id_to_internal_id(ec2_id) + instance_ref = db.instance_get_by_internal_id(context, internal_id) + output = rpc.call('%s.%s' % (FLAGS.compute_topic, + instance_ref['host']), + { "method" : "get_console_output", + "args" : { "context": None, + "instance_id": instance_ref['id']}}) + + now = datetime.datetime.utcnow() + return { "InstanceId" : ec2_id, + "Timestamp" : now, + "output" : base64.b64encode(output) } + + def describe_volumes(self, context, **kwargs): + if context.user.is_admin(): + volumes = db.volume_get_all(context) + else: + volumes = db.volume_get_all_by_project(context, context.project.id) + + volumes = [self._format_volume(context, v) for v in volumes] + + return {'volumeSet': volumes} + + def _format_volume(self, context, volume): + v = {} + v['volumeId'] = volume['ec2_id'] + v['status'] = volume['status'] + v['size'] = volume['size'] + v['availabilityZone'] = volume['availability_zone'] + v['createTime'] = volume['created_at'] + if context.user.is_admin(): + v['status'] = '%s (%s, %s, %s, %s)' % ( + volume['status'], + volume['user_id'], + volume['host'], + volume['instance_id'], + volume['mountpoint']) + if volume['attach_status'] == 'attached': + v['attachmentSet'] = [{'attachTime': volume['attach_time'], + 'deleteOnTermination': False, + 'device': volume['mountpoint'], + 'instanceId': volume['instance_id'], + 'status': 'attached', + 'volume_id': volume['ec2_id']}] + else: + v['attachmentSet'] = [{}] + + v['display_name'] = volume['display_name'] + v['display_description'] = volume['display_description'] + return v + + def create_volume(self, context, size, **kwargs): + # check quota + if quota.allowed_volumes(context, 1, size) < 1: + logging.warn("Quota exceeeded for %s, tried to create %sG volume", + context.project.id, size) + raise QuotaError("Volume quota exceeded. You cannot " + "create a volume of size %s" % + size) + vol = {} + vol['size'] = size + vol['user_id'] = context.user.id + vol['project_id'] = context.project.id + vol['availability_zone'] = FLAGS.storage_availability_zone + vol['status'] = "creating" + vol['attach_status'] = "detached" + vol['display_name'] = kwargs.get('display_name') + vol['display_description'] = kwargs.get('display_description') + volume_ref = db.volume_create(context, vol) + + rpc.cast(FLAGS.scheduler_topic, + {"method": "create_volume", + "args": {"context": None, + "topic": FLAGS.volume_topic, + "volume_id": volume_ref['id']}}) + + return {'volumeSet': [self._format_volume(context, volume_ref)]} + + + def attach_volume(self, context, volume_id, instance_id, device, **kwargs): + volume_ref = db.volume_get_by_ec2_id(context, volume_id) + # TODO(vish): abstract status checking? + if volume_ref['status'] != "available": + raise exception.ApiError("Volume status must be available") + if volume_ref['attach_status'] == "attached": + raise exception.ApiError("Volume is already attached") + internal_id = ec2_id_to_internal_id(instance_id) + instance_ref = db.instance_get_by_internal_id(context, internal_id) + host = instance_ref['host'] + rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "attach_volume", + "args": {"context": None, + "volume_id": volume_ref['id'], + "instance_id": instance_ref['id'], + "mountpoint": device}}) + return {'attachTime': volume_ref['attach_time'], + 'device': volume_ref['mountpoint'], + 'instanceId': instance_ref['id'], + 'requestId': context.request_id, + 'status': volume_ref['attach_status'], + 'volumeId': volume_ref['id']} + + def detach_volume(self, context, volume_id, **kwargs): + volume_ref = db.volume_get_by_ec2_id(context, volume_id) + instance_ref = db.volume_get_instance(context, volume_ref['id']) + if not instance_ref: + raise exception.ApiError("Volume isn't attached to anything!") + # TODO(vish): abstract status checking? + if volume_ref['status'] == "available": + raise exception.ApiError("Volume is already detached") + try: + host = instance_ref['host'] + rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "detach_volume", + "args": {"context": None, + "instance_id": instance_ref['id'], + "volume_id": volume_ref['id']}}) + except exception.NotFound: + # If the instance doesn't exist anymore, + # then we need to call detach blind + db.volume_detached(context) + internal_id = instance_ref['internal_id'] + ec2_id = internal_id_to_ec2_id(internal_id) + return {'attachTime': volume_ref['attach_time'], + 'device': volume_ref['mountpoint'], + 'instanceId': internal_id, + 'requestId': context.request_id, + 'status': volume_ref['attach_status'], + 'volumeId': volume_ref['id']} + + def _convert_to_set(self, lst, label): + if lst == None or lst == []: + return None + if not isinstance(lst, list): + lst = [lst] + return [{label: x} for x in lst] + + def update_volume(self, context, volume_id, **kwargs): + updatable_fields = ['display_name', 'display_description'] + changes = {} + for field in updatable_fields: + if field in kwargs: + changes[field] = kwargs[field] + if changes: + db.volume_update(context, volume_id, kwargs) + return True + + def describe_instances(self, context, **kwargs): + return self._format_describe_instances(context) + + def _format_describe_instances(self, context): + return { 'reservationSet': self._format_instances(context) } + + def _format_run_instances(self, context, reservation_id): + i = self._format_instances(context, reservation_id) + assert len(i) == 1 + return i[0] + + def _format_instances(self, context, reservation_id=None): + reservations = {} + if reservation_id: + instances = db.instance_get_all_by_reservation(context, + reservation_id) + else: + if context.user.is_admin(): + instances = db.instance_get_all(context) + else: + instances = db.instance_get_all_by_project(context, + context.project.id) + for instance in instances: + if not context.user.is_admin(): + if instance['image_id'] == FLAGS.vpn_image_id: + continue + i = {} + internal_id = instance['internal_id'] + ec2_id = internal_id_to_ec2_id(internal_id) + i['instanceId'] = ec2_id + i['imageId'] = instance['image_id'] + i['instanceState'] = { + 'code': instance['state'], + 'name': instance['state_description'] + } + fixed_addr = None + floating_addr = None + if instance['fixed_ip']: + fixed_addr = instance['fixed_ip']['address'] + if instance['fixed_ip']['floating_ips']: + fixed = instance['fixed_ip'] + floating_addr = fixed['floating_ips'][0]['address'] + i['privateDnsName'] = fixed_addr + i['publicDnsName'] = floating_addr + i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] + i['keyName'] = instance['key_name'] + if context.user.is_admin(): + i['keyName'] = '%s (%s, %s)' % (i['keyName'], + instance['project_id'], + instance['host']) + i['productCodesSet'] = self._convert_to_set([], 'product_codes') + i['instanceType'] = instance['instance_type'] + i['launchTime'] = instance['created_at'] + i['amiLaunchIndex'] = instance['launch_index'] + i['displayName'] = instance['display_name'] + i['displayDescription'] = instance['display_description'] + if not reservations.has_key(instance['reservation_id']): + r = {} + r['reservationId'] = instance['reservation_id'] + r['ownerId'] = instance['project_id'] + r['groupSet'] = self._convert_to_set([], 'groups') + r['instancesSet'] = [] + reservations[instance['reservation_id']] = r + reservations[instance['reservation_id']]['instancesSet'].append(i) + + return list(reservations.values()) + + def describe_addresses(self, context, **kwargs): + return self.format_addresses(context) + + def format_addresses(self, context): + addresses = [] + if context.user.is_admin(): + iterator = db.floating_ip_get_all(context) + else: + iterator = db.floating_ip_get_all_by_project(context, + context.project.id) + for floating_ip_ref in iterator: + address = floating_ip_ref['address'] + instance_id = None + if (floating_ip_ref['fixed_ip'] + and floating_ip_ref['fixed_ip']['instance']): + internal_id = floating_ip_ref['fixed_ip']['instance']['ec2_id'] + ec2_id = internal_id_to_ec2_id(internal_id) + address_rv = {'public_ip': address, + 'instance_id': ec2_id} + if context.user.is_admin(): + details = "%s (%s)" % (address_rv['instance_id'], + floating_ip_ref['project_id']) + address_rv['instance_id'] = details + addresses.append(address_rv) + return {'addressesSet': addresses} + + def allocate_address(self, context, **kwargs): + # check quota + if quota.allowed_floating_ips(context, 1) < 1: + logging.warn("Quota exceeeded for %s, tried to allocate address", + context.project.id) + raise QuotaError("Address quota exceeded. You cannot " + "allocate any more addresses") + network_topic = self._get_network_topic(context) + public_ip = rpc.call(network_topic, + {"method": "allocate_floating_ip", + "args": {"context": None, + "project_id": context.project.id}}) + return {'addressSet': [{'publicIp': public_ip}]} + + def release_address(self, context, public_ip, **kwargs): + # NOTE(vish): Should we make sure this works? + floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) + network_topic = self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "deallocate_floating_ip", + "args": {"context": None, + "floating_address": floating_ip_ref['address']}}) + return {'releaseResponse': ["Address released."]} + + def associate_address(self, context, ec2_id, public_ip, **kwargs): + internal_id = ec2_id_to_internal_id(ec2_id) + instance_ref = db.instance_get_by_internal_id(context, internal_id) + fixed_address = db.instance_get_fixed_address(context, + instance_ref['id']) + floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) + network_topic = self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "associate_floating_ip", + "args": {"context": None, + "floating_address": floating_ip_ref['address'], + "fixed_address": fixed_address}}) + return {'associateResponse': ["Address associated."]} + + def disassociate_address(self, context, public_ip, **kwargs): + floating_ip_ref = db.floating_ip_get_by_address(context, public_ip) + network_topic = self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "disassociate_floating_ip", + "args": {"context": None, + "floating_address": floating_ip_ref['address']}}) + return {'disassociateResponse': ["Address disassociated."]} + + def _get_network_topic(self, context): + """Retrieves the network host for a project""" + network_ref = self.network_manager.get_network(context) + host = network_ref['host'] + if not host: + host = rpc.call(FLAGS.network_topic, + {"method": "set_network_host", + "args": {"context": None, + "network_id": network_ref['id']}}) + return db.queue_get_for(context, FLAGS.network_topic, host) + + def _ensure_default_security_group(self, context): + try: + db.security_group_get_by_name(context, + context.project.id, + 'default') + except exception.NotFound: + values = { 'name' : 'default', + 'description' : 'default', + 'user_id' : context.user.id, + 'project_id' : context.project.id } + group = db.security_group_create(context, values) + + def run_instances(self, context, **kwargs): + instance_type = kwargs.get('instance_type', 'm1.small') + if instance_type not in INSTANCE_TYPES: + raise exception.ApiError("Unknown instance type: %s", + instance_type) + # check quota + max_instances = int(kwargs.get('max_count', 1)) + min_instances = int(kwargs.get('min_count', max_instances)) + num_instances = quota.allowed_instances(context, + max_instances, + instance_type) + if num_instances < min_instances: + logging.warn("Quota exceeeded for %s, tried to run %s instances", + context.project.id, min_instances) + raise QuotaError("Instance quota exceeded. You can only " + "run %s more instances of this type." % + num_instances, "InstanceLimitExceeded") + # make sure user can access the image + # vpn image is private so it doesn't show up on lists + vpn = kwargs['image_id'] == FLAGS.vpn_image_id + + if not vpn: + image = images.get(context, kwargs['image_id']) + + # FIXME(ja): if image is vpn, this breaks + # get defaults from imagestore + image_id = image['imageId'] + kernel_id = image.get('kernelId', FLAGS.default_kernel) + ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk) + + # API parameters overrides of defaults + kernel_id = kwargs.get('kernel_id', kernel_id) + ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id) + + if kernel_id == str(FLAGS.null_kernel): + kernel_id = None + ramdisk_id = None + + # make sure we have access to kernel and ramdisk + if kernel_id: + images.get(context, kernel_id) + if ramdisk_id: + images.get(context, ramdisk_id) + + logging.debug("Going to run %s instances...", num_instances) + launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + key_data = None + if kwargs.has_key('key_name'): + key_pair_ref = db.key_pair_get(context, + context.user.id, + kwargs['key_name']) + key_data = key_pair_ref['public_key'] + + security_group_arg = kwargs.get('security_group', ["default"]) + if not type(security_group_arg) is list: + security_group_arg = [security_group_arg] + + security_groups = [] + self._ensure_default_security_group(context) + for security_group_name in security_group_arg: + group = db.security_group_get_by_name(context, + context.project.id, + security_group_name) + security_groups.append(group['id']) + + reservation_id = utils.generate_uid('r') + base_options = {} + base_options['state_description'] = 'scheduling' + base_options['image_id'] = image_id + base_options['kernel_id'] = kernel_id or '' + base_options['ramdisk_id'] = ramdisk_id or '' + base_options['reservation_id'] = reservation_id + base_options['key_data'] = key_data + base_options['key_name'] = kwargs.get('key_name', None) + base_options['user_id'] = context.user.id + base_options['project_id'] = context.project.id + base_options['user_data'] = kwargs.get('user_data', '') + + base_options['display_name'] = kwargs.get('display_name') + base_options['display_description'] = kwargs.get('display_description') + + type_data = INSTANCE_TYPES[instance_type] + base_options['instance_type'] = instance_type + base_options['memory_mb'] = type_data['memory_mb'] + base_options['vcpus'] = type_data['vcpus'] + base_options['local_gb'] = type_data['local_gb'] + + for num in range(num_instances): + instance_ref = db.instance_create(context, base_options) + inst_id = instance_ref['id'] + + for security_group_id in security_groups: + db.instance_add_security_group(context, inst_id, + security_group_id) + + inst = {} + inst['mac_address'] = utils.generate_mac() + inst['launch_index'] = num + internal_id = instance_ref['internal_id'] + ec2_id = internal_id_to_ec2_id(internal_id) + inst['hostname'] = ec2_id + db.instance_update(context, inst_id, inst) + # TODO(vish): This probably should be done in the scheduler + # or in compute as a call. The network should be + # allocated after the host is assigned and setup + # can happen at the same time. + address = self.network_manager.allocate_fixed_ip(context, + inst_id, + vpn) + network_topic = self._get_network_topic(context) + rpc.call(network_topic, + {"method": "setup_fixed_ip", + "args": {"context": None, + "address": address}}) + + rpc.cast(FLAGS.scheduler_topic, + {"method": "run_instance", + "args": {"context": None, + "topic": FLAGS.compute_topic, + "instance_id": inst_id}}) + logging.debug("Casting to scheduler for %s/%s's instance %s" % + (context.project.name, context.user.name, inst_id)) + return self._format_run_instances(context, reservation_id) + + + def terminate_instances(self, context, instance_id, **kwargs): + """Terminate each instance in instance_id, which is a list of ec2 ids. + + instance_id is a kwarg so its name cannot be modified. + """ + ec2_id_list = instance_id + logging.debug("Going to start terminating instances") + for id_str in ec2_id_list: + internal_id = ec2_id_to_internal_id(id_str) + logging.debug("Going to try and terminate %s" % id_str) + try: + instance_ref = db.instance_get_by_internal_id(context, + internal_id) + except exception.NotFound: + logging.warning("Instance %s was not found during terminate", + id_str) + continue + + if (instance_ref['state_description'] == 'terminating'): + logging.warning("Instance %s is already being terminated", + id_str) + continue + now = datetime.datetime.utcnow() + db.instance_update(context, + instance_ref['id'], + {'state_description': 'terminating', + 'state': 0, + 'terminated_at': now}) + # FIXME(ja): where should network deallocate occur? + address = db.instance_get_floating_address(context, + instance_ref['id']) + if address: + logging.debug("Disassociating address %s" % address) + # NOTE(vish): Right now we don't really care if the ip is + # disassociated. We may need to worry about + # checking this later. Perhaps in the scheduler? + network_topic = self._get_network_topic(context) + rpc.cast(network_topic, + {"method": "disassociate_floating_ip", + "args": {"context": None, + "floating_address": address}}) + + address = db.instance_get_fixed_address(context, + instance_ref['id']) + if address: + logging.debug("Deallocating address %s" % address) + # NOTE(vish): Currently, nothing needs to be done on the + # network node until release. If this changes, + # we will need to cast here. + self.network_manager.deallocate_fixed_ip(context, address) + + host = instance_ref['host'] + if host: + rpc.cast(db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "terminate_instance", + "args": {"context": None, + "instance_id": instance_ref['id']}}) + else: + db.instance_destroy(context, instance_ref['id']) + return True + + def reboot_instances(self, context, instance_id, **kwargs): + """instance_id is a list of instance ids""" + for id_str in instance_id: + cloud.reboot(id_str, context=context) + return True + + def update_instance(self, context, ec2_id, **kwargs): + updatable_fields = ['display_name', 'display_description'] + changes = {} + for field in updatable_fields: + if field in kwargs: + changes[field] = kwargs[field] + if changes: + db_context = {} + internal_id = ec2_id_to_internal_id(ec2_id) + inst = db.instance_get_by_internal_id(db_context, internal_id) + db.instance_update(db_context, inst['id'], kwargs) + return True + + def delete_volume(self, context, volume_id, **kwargs): + # TODO: return error if not authorized + volume_ref = db.volume_get_by_ec2_id(context, volume_id) + if volume_ref['status'] != "available": + raise exception.ApiError("Volume status must be available") + now = datetime.datetime.utcnow() + db.volume_update(context, volume_ref['id'], {'status': 'deleting', + 'terminated_at': now}) + host = volume_ref['host'] + rpc.cast(db.queue_get_for(context, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"context": None, + "volume_id": volume_ref['id']}}) + return True + + def describe_images(self, context, image_id=None, **kwargs): + # The objectstore does its own authorization for describe + imageSet = images.list(context, image_id) + return {'imagesSet': imageSet} + + def deregister_image(self, context, image_id, **kwargs): + # FIXME: should the objectstore be doing these authorization checks? + images.deregister(context, image_id) + return {'imageId': image_id} + + def register_image(self, context, image_location=None, **kwargs): + # FIXME: should the objectstore be doing these authorization checks? + if image_location is None and kwargs.has_key('name'): + image_location = kwargs['name'] + image_id = images.register(context, image_location) + logging.debug("Registered %s as %s" % (image_location, image_id)) + return {'imageId': image_id} + + def describe_image_attribute(self, context, image_id, attribute, **kwargs): + if attribute != 'launchPermission': + raise exception.ApiError('attribute not supported: %s' % attribute) + try: + image = images.list(context, image_id)[0] + except IndexError: + raise exception.ApiError('invalid id: %s' % image_id) + result = {'image_id': image_id, 'launchPermission': []} + if image['isPublic']: + result['launchPermission'].append({'group': 'all'}) + return result + + def modify_image_attribute(self, context, image_id, attribute, operation_type, **kwargs): + # TODO(devcamcar): Support users and groups other than 'all'. + if attribute != 'launchPermission': + raise exception.ApiError('attribute not supported: %s' % attribute) + if not 'user_group' in kwargs: + raise exception.ApiError('user or group not specified') + if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': + raise exception.ApiError('only group "all" is supported') + if not operation_type in ['add', 'remove']: + raise exception.ApiError('operation_type must be add or remove') + return images.modify(context, image_id, operation_type) + + def update_image(self, context, image_id, **kwargs): + result = images.update(context, image_id, dict(kwargs)) + return result diff --git a/nova/api/ec2/images.py b/nova/api/ec2/images.py new file mode 100644 index 000000000..f0a43dad6 --- /dev/null +++ b/nova/api/ec2/images.py @@ -0,0 +1,119 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Proxy AMI-related calls from the cloud controller, to the running +objectstore service. +""" + +import json +import urllib + +import boto.s3.connection + +from nova import exception +from nova import flags +from nova import utils +from nova.auth import manager + + +FLAGS = flags.FLAGS + + +def modify(context, image_id, operation): + conn(context).make_request( + method='POST', + bucket='_images', + query_args=qs({'image_id': image_id, 'operation': operation})) + + return True + +def update(context, image_id, attributes): + """update an image's attributes / info.json""" + attributes.update({"image_id": image_id}) + conn(context).make_request( + method='POST', + bucket='_images', + query_args=qs(attributes)) + return True + +def register(context, image_location): + """ rpc call to register a new image based from a manifest """ + + image_id = utils.generate_uid('ami') + conn(context).make_request( + method='PUT', + bucket='_images', + query_args=qs({'image_location': image_location, + 'image_id': image_id})) + + return image_id + +def list(context, filter_list=[]): + """ return a list of all images that a user can see + + optionally filtered by a list of image_id """ + + if FLAGS.connection_type == 'fake': + return [{ 'imageId' : 'bar'}] + + # FIXME: send along the list of only_images to check for + response = conn(context).make_request( + method='GET', + bucket='_images') + + result = json.loads(response.read()) + if not filter_list is None: + return [i for i in result if i['imageId'] in filter_list] + return result + +def get(context, image_id): + """return a image object if the context has permissions""" + result = list(context, [image_id]) + if not result: + raise exception.NotFound('Image %s could not be found' % image_id) + image = result[0] + return image + + +def deregister(context, image_id): + """ unregister an image """ + conn(context).make_request( + method='DELETE', + bucket='_images', + query_args=qs({'image_id': image_id})) + + +def conn(context): + access = manager.AuthManager().get_access_key(context.user, + context.project) + secret = str(context.user.secret) + calling = boto.s3.connection.OrdinaryCallingFormat() + return boto.s3.connection.S3Connection(aws_access_key_id=access, + aws_secret_access_key=secret, + is_secure=False, + calling_format=calling, + port=FLAGS.s3_port, + host=FLAGS.s3_host) + + +def qs(params): + pairs = [] + for key in params.keys(): + pairs.append(key + '=' + urllib.quote(params[key])) + return '&'.join(pairs) diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py new file mode 100644 index 000000000..08a8040ca --- /dev/null +++ b/nova/api/ec2/metadatarequesthandler.py @@ -0,0 +1,73 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Metadata request handler.""" + +import logging + +import webob.dec +import webob.exc + +from nova.api.ec2 import cloud + + +class MetadataRequestHandler(object): + + """Serve metadata from the EC2 API.""" + + def print_data(self, data): + if isinstance(data, dict): + output = '' + for key in data: + if key == '_name': + continue + output += key + if isinstance(data[key], dict): + if '_name' in data[key]: + output += '=' + str(data[key]['_name']) + else: + output += '/' + output += '\n' + return output[:-1] # cut off last \n + elif isinstance(data, list): + return '\n'.join(data) + else: + return str(data) + + def lookup(self, path, data): + items = path.split('/') + for item in items: + if item: + if not isinstance(data, dict): + return data + if not item in data: + return None + data = data[item] + return data + + @webob.dec.wsgify + def __call__(self, req): + cc = cloud.CloudController() + meta_data = cc.get_metadata(req.remote_addr) + if meta_data is None: + logging.error('Failed to get metadata for ip: %s' % req.remote_addr) + raise webob.exc.HTTPNotFound() + data = self.lookup(req.path_info, meta_data) + if data is None: + raise webob.exc.HTTPNotFound() + return self.print_data(data) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py new file mode 100644 index 000000000..5e81ba2bd --- /dev/null +++ b/nova/api/openstack/__init__.py @@ -0,0 +1,190 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +WSGI middleware for OpenStack API controllers. +""" + +import json +import time + +import routes +import webob.dec +import webob.exc +import webob + +from nova import flags +from nova import utils +from nova import wsgi +from nova.api.openstack import faults +from nova.api.openstack import backup_schedules +from nova.api.openstack import flavors +from nova.api.openstack import images +from nova.api.openstack import ratelimiting +from nova.api.openstack import servers +from nova.api.openstack import sharedipgroups +from nova.auth import manager + + +FLAGS = flags.FLAGS +flags.DEFINE_string('nova_api_auth', + 'nova.api.openstack.auth.BasicApiAuthManager', + 'The auth mechanism to use for the OpenStack API implemenation') + +class API(wsgi.Middleware): + """WSGI entry point for all OpenStack API requests.""" + + def __init__(self): + app = AuthMiddleware(RateLimitingMiddleware(APIRouter())) + super(API, self).__init__(app) + +class AuthMiddleware(wsgi.Middleware): + """Authorize the openstack API request or return an HTTP Forbidden.""" + + def __init__(self, application): + self.auth_driver = utils.import_class(FLAGS.nova_api_auth)() + super(AuthMiddleware, self).__init__(application) + + @webob.dec.wsgify + def __call__(self, req): + if not req.headers.has_key("X-Auth-Token"): + return self.auth_driver.authenticate(req) + + user = self.auth_driver.authorize_token(req.headers["X-Auth-Token"]) + + if not user: + return faults.Fault(webob.exc.HTTPUnauthorized()) + + if not req.environ.has_key('nova.context'): + req.environ['nova.context'] = {} + req.environ['nova.context']['user'] = user + return self.application + +class RateLimitingMiddleware(wsgi.Middleware): + """Rate limit incoming requests according to the OpenStack rate limits.""" + + def __init__(self, application, service_host=None): + """Create a rate limiting middleware that wraps the given application. + + By default, rate counters are stored in memory. If service_host is + specified, the middleware instead relies on the ratelimiting.WSGIApp + at the given host+port to keep rate counters. + """ + super(RateLimitingMiddleware, self).__init__(application) + if not service_host: + #TODO(gundlach): These limits were based on limitations of Cloud + #Servers. We should revisit them in Nova. + self.limiter = ratelimiting.Limiter(limits={ + 'DELETE': (100, ratelimiting.PER_MINUTE), + 'PUT': (10, ratelimiting.PER_MINUTE), + 'POST': (10, ratelimiting.PER_MINUTE), + 'POST servers': (50, ratelimiting.PER_DAY), + 'GET changes-since': (3, ratelimiting.PER_MINUTE), + }) + else: + self.limiter = ratelimiting.WSGIAppProxy(service_host) + + @webob.dec.wsgify + def __call__(self, req): + """Rate limit the request. + + If the request should be rate limited, return a 413 status with a + Retry-After header giving the time when the request would succeed. + """ + username = req.headers['X-Auth-User'] + action_name = self.get_action_name(req) + if not action_name: # not rate limited + return self.application + delay = self.get_delay(action_name, username) + if delay: + # TODO(gundlach): Get the retry-after format correct. + exc = webob.exc.HTTPRequestEntityTooLarge( + explanation='Too many requests.', + headers={'Retry-After': time.time() + delay}) + raise faults.Fault(exc) + return self.application + + def get_delay(self, action_name, username): + """Return the delay for the given action and username, or None if + the action would not be rate limited. + """ + if action_name == 'POST servers': + # "POST servers" is a POST, so it counts against "POST" too. + # Attempt the "POST" first, lest we are rate limited by "POST" but + # use up a precious "POST servers" call. + delay = self.limiter.perform("POST", username=username) + if delay: + return delay + return self.limiter.perform(action_name, username=username) + + def get_action_name(self, req): + """Return the action name for this request.""" + if req.method == 'GET' and 'changes-since' in req.GET: + return 'GET changes-since' + if req.method == 'POST' and req.path_info.startswith('/servers'): + return 'POST servers' + if req.method in ['PUT', 'POST', 'DELETE']: + return req.method + return None + + +class APIRouter(wsgi.Router): + """ + Routes requests on the OpenStack API to the appropriate controller + and method. + """ + + def __init__(self): + mapper = routes.Mapper() + mapper.resource("server", "servers", controller=servers.Controller(), + collection={ 'detail': 'GET'}, + member={'action':'POST'}) + + mapper.resource("backup_schedule", "backup_schedules", + controller=backup_schedules.Controller(), + parent_resource=dict(member_name='server', + collection_name = 'servers')) + + mapper.resource("image", "images", controller=images.Controller(), + collection={'detail': 'GET'}) + mapper.resource("flavor", "flavors", controller=flavors.Controller(), + collection={'detail': 'GET'}) + mapper.resource("sharedipgroup", "sharedipgroups", + controller=sharedipgroups.Controller()) + + super(APIRouter, self).__init__(mapper) + + +def limited(items, req): + """Return a slice of items according to requested offset and limit. + + items - a sliceable + req - wobob.Request possibly containing offset and limit GET variables. + offset is where to start in the list, and limit is the maximum number + of items to return. + + If limit is not specified, 0, or > 1000, defaults to 1000. + """ + offset = int(req.GET.get('offset', 0)) + limit = int(req.GET.get('limit', 0)) + if not limit: + limit = 1000 + limit = min(1000, limit) + range_end = offset + limit + return items[offset:range_end] + diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py new file mode 100644 index 000000000..7aba55728 --- /dev/null +++ b/nova/api/openstack/auth.py @@ -0,0 +1,101 @@ +import datetime +import hashlib +import json +import time + +import webob.exc +import webob.dec + +from nova import auth +from nova import db +from nova import flags +from nova import manager +from nova import utils +from nova.api.openstack import faults + +FLAGS = flags.FLAGS + +class Context(object): + pass + +class BasicApiAuthManager(object): + """ Implements a somewhat rudimentary version of OpenStack Auth""" + + def __init__(self, host=None, db_driver=None): + if not host: + host = FLAGS.host + self.host = host + if not db_driver: + db_driver = FLAGS.db_driver + self.db = utils.import_object(db_driver) + self.auth = auth.manager.AuthManager() + self.context = Context() + super(BasicApiAuthManager, self).__init__() + + def authenticate(self, req): + # Unless the request is explicitly made against /<version>/ don't + # honor it + path_info = req.path_info + if len(path_info) > 1: + return faults.Fault(webob.exc.HTTPUnauthorized()) + + try: + username = req.headers['X-Auth-User'] + key = req.headers['X-Auth-Key'] + except KeyError: + return faults.Fault(webob.exc.HTTPUnauthorized()) + + token, user = self._authorize_user(username, key) + if user and token: + res = webob.Response() + res.headers['X-Auth-Token'] = token.token_hash + res.headers['X-Server-Management-Url'] = \ + token.server_management_url + res.headers['X-Storage-Url'] = token.storage_url + res.headers['X-CDN-Management-Url'] = token.cdn_management_url + res.content_type = 'text/plain' + res.status = '204' + return res + else: + return faults.Fault(webob.exc.HTTPUnauthorized()) + + def authorize_token(self, token_hash): + """ retrieves user information from the datastore given a token + + If the token has expired, returns None + If the token is not found, returns None + Otherwise returns dict(id=(the authorized user's id)) + + This method will also remove the token if the timestamp is older than + 2 days ago. + """ + token = self.db.auth_get_token(self.context, token_hash) + if token: + delta = datetime.datetime.now() - token.created_at + if delta.days >= 2: + self.db.auth_destroy_token(self.context, token) + else: + #TODO(gundlach): Why not just return dict(id=token.user_id)? + user = self.auth.get_user(token.user_id) + return {'id': user.id} + return None + + def _authorize_user(self, username, key): + """ Generates a new token and assigns it to a user """ + user = self.auth.get_user_from_access_key(key) + if user and user.name == username: + token_hash = hashlib.sha1('%s%s%f' % (username, key, + time.time())).hexdigest() + token_dict = {} + token_dict['token_hash'] = token_hash + token_dict['cdn_management_url'] = '' + token_dict['server_management_url'] = self._get_server_mgmt_url() + token_dict['storage_url'] = '' + token_dict['user_id'] = user.id + token = self.db.auth_create_token(self.context, token_dict) + return token, user + return None, None + + def _get_server_mgmt_url(self): + return 'https://%s/v1.0/' % self.host + diff --git a/nova/api/rackspace/images.py b/nova/api/openstack/backup_schedules.py index 986f11434..76ad6ef87 100644 --- a/nova/api/rackspace/images.py +++ b/nova/api/openstack/backup_schedules.py @@ -15,4 +15,24 @@ # License for the specific language governing permissions and limitations # under the License. -class Controller(object): pass +import time +from webob import exc + +from nova import wsgi +from nova.api.openstack import faults +import nova.image.service + +class Controller(wsgi.Controller): + def __init__(self): + pass + + def index(self, req, server_id): + return faults.Fault(exc.HTTPNotFound()) + + def create(self, req, server_id): + """ No actual update method required, since the existing API allows + both create and update through a POST """ + return faults.Fault(exc.HTTPNotFound()) + + def delete(self, req, server_id): + return faults.Fault(exc.HTTPNotFound()) diff --git a/nova/api/rackspace/base.py b/nova/api/openstack/base.py index dd2c6543c..dd2c6543c 100644 --- a/nova/api/rackspace/base.py +++ b/nova/api/openstack/base.py diff --git a/nova/api/rackspace/flavors.py b/nova/api/openstack/context.py index 986f11434..77394615b 100644 --- a/nova/api/rackspace/flavors.py +++ b/nova/api/openstack/context.py @@ -15,4 +15,19 @@ # License for the specific language governing permissions and limitations # under the License. -class Controller(object): pass +""" +APIRequestContext +""" + +import random + +class Project(object): + def __init__(self, user_id): + self.id = user_id + +class APIRequestContext(object): + """ This is an adapter class to get around all of the assumptions made in + the FlatNetworking """ + def __init__(self, user_id): + self.user_id = user_id + self.project = Project(user_id) diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py new file mode 100644 index 000000000..32e5c866f --- /dev/null +++ b/nova/api/openstack/faults.py @@ -0,0 +1,62 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import webob.dec +import webob.exc + +from nova import wsgi + + +class Fault(webob.exc.HTTPException): + + """An RS API fault response.""" + + _fault_names = { + 400: "badRequest", + 401: "unauthorized", + 403: "resizeNotAllowed", + 404: "itemNotFound", + 405: "badMethod", + 409: "inProgress", + 413: "overLimit", + 415: "badMediaType", + 501: "notImplemented", + 503: "serviceUnavailable"} + + def __init__(self, exception): + """Create a Fault for the given webob.exc.exception.""" + self.wrapped_exc = exception + + @webob.dec.wsgify + def __call__(self, req): + """Generate a WSGI response based on the exception passed to ctor.""" + # Replace the body with fault details. + code = self.wrapped_exc.status_int + fault_name = self._fault_names.get(code, "cloudServersFault") + fault_data = { + fault_name: { + 'code': code, + 'message': self.wrapped_exc.explanation}} + if code == 413: + retry = self.wrapped_exc.headers['Retry-After'] + fault_data[fault_name]['retryAfter'] = retry + # 'code' is an attribute on the fault tag itself + metadata = {'application/xml': {'attributes': {fault_name: 'code'}}} + serializer = wsgi.Serializer(req.environ, metadata) + self.wrapped_exc.body = serializer.to_content_type(fault_data) + return self.wrapped_exc diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py new file mode 100644 index 000000000..793984a5d --- /dev/null +++ b/nova/api/openstack/flavors.py @@ -0,0 +1,58 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from webob import exc + +from nova.api.openstack import faults +from nova.compute import instance_types +from nova import wsgi +import nova.api.openstack + +class Controller(wsgi.Controller): + """Flavor controller for the OpenStack API.""" + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "flavor": [ "id", "name", "ram", "disk" ] + } + } + } + + def index(self, req): + """Return all flavors in brief.""" + return dict(flavors=[dict(id=flavor['id'], name=flavor['name']) + for flavor in self.detail(req)['flavors']]) + + def detail(self, req): + """Return all flavors in detail.""" + items = [self.show(req, id)['flavor'] for id in self._all_ids()] + items = nova.api.openstack.limited(items, req) + return dict(flavors=items) + + def show(self, req, id): + """Return data about the given flavor id.""" + for name, val in instance_types.INSTANCE_TYPES.iteritems(): + if val['flavorid'] == int(id): + item = dict(ram=val['memory_mb'], disk=val['local_gb'], + id=val['flavorid'], name=name) + return dict(flavor=item) + raise faults.Fault(exc.HTTPNotFound()) + + def _all_ids(self): + """Return the list of all flavorids.""" + return [i['flavorid'] for i in instance_types.INSTANCE_TYPES.values()] diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py new file mode 100644 index 000000000..aa438739c --- /dev/null +++ b/nova/api/openstack/images.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from webob import exc + +from nova import flags +from nova import utils +from nova import wsgi +import nova.api.openstack +import nova.image.service +from nova.api.openstack import faults + + +FLAGS = flags.FLAGS + +class Controller(wsgi.Controller): + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "image": [ "id", "name", "updated", "created", "status", + "serverId", "progress" ] + } + } + } + + def __init__(self): + self._service = utils.import_object(FLAGS.image_service) + + def index(self, req): + """Return all public images in brief.""" + return dict(images=[dict(id=img['id'], name=img['name']) + for img in self.detail(req)['images']]) + + def detail(self, req): + """Return all public images in detail.""" + data = self._service.index() + data = nova.api.openstack.limited(data, req) + return dict(images=data) + + def show(self, req, id): + """Return data about the given image id.""" + return dict(image=self._service.show(id)) + + def delete(self, req, id): + # Only public images are supported for now. + raise faults.Fault(exc.HTTPNotFound()) + + def create(self, req): + # Only public images are supported for now, so a request to + # make a backup of a server cannot be supproted. + raise faults.Fault(exc.HTTPNotFound()) + + def update(self, req, id): + # Users may not modify public images, and that's all that + # we support for now. + raise faults.Fault(exc.HTTPNotFound()) diff --git a/nova/api/openstack/notes.txt b/nova/api/openstack/notes.txt new file mode 100644 index 000000000..2330f1002 --- /dev/null +++ b/nova/api/openstack/notes.txt @@ -0,0 +1,23 @@ +We will need: + +ImageService +a service that can do crud on image information. not user-specific. opaque +image ids. + +GlanceImageService(ImageService): +image ids are URIs. + +LocalImageService(ImageService): +image ids are random strings. + +OpenstackAPITranslationStore: +translates RS server/images/flavor/etc ids into formats required +by a given ImageService strategy. + +api.openstack.images.Controller: +uses an ImageService strategy behind the scenes to do its fetching; it just +converts int image id into a strategy-specific image id. + +who maintains the mapping from user to [images he owns]? nobody, because +we have no way of enforcing access to his images, without kryptex which +won't be in Austin. diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py new file mode 100644 index 000000000..f843bac0f --- /dev/null +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -0,0 +1,122 @@ +"""Rate limiting of arbitrary actions.""" + +import httplib +import time +import urllib +import webob.dec +import webob.exc + + +# Convenience constants for the limits dictionary passed to Limiter(). +PER_SECOND = 1 +PER_MINUTE = 60 +PER_HOUR = 60 * 60 +PER_DAY = 60 * 60 * 24 + +class Limiter(object): + + """Class providing rate limiting of arbitrary actions.""" + + def __init__(self, limits): + """Create a rate limiter. + + limits: a dict mapping from action name to a tuple. The tuple contains + the number of times the action may be performed, and the time period + (in seconds) during which the number must not be exceeded for this + action. Example: dict(reboot=(10, ratelimiting.PER_MINUTE)) would + allow 10 'reboot' actions per minute. + """ + self.limits = limits + self._levels = {} + + def perform(self, action_name, username='nobody'): + """Attempt to perform an action by the given username. + + action_name: the string name of the action to perform. This must + be a key in the limits dict passed to the ctor. + + username: an optional string name of the user performing the action. + Each user has her own set of rate limiting counters. Defaults to + 'nobody' (so that if you never specify a username when calling + perform(), a single set of counters will be used.) + + Return None if the action may proceed. If the action may not proceed + because it has been rate limited, return the float number of seconds + until the action would succeed. + """ + # Think of rate limiting as a bucket leaking water at 1cc/second. The + # bucket can hold as many ccs as there are seconds in the rate + # limiting period (e.g. 3600 for per-hour ratelimits), and if you can + # perform N actions in that time, each action fills the bucket by + # 1/Nth of its volume. You may only perform an action if the bucket + # would not overflow. + now = time.time() + key = '%s:%s' % (username, action_name) + last_time_performed, water_level = self._levels.get(key, (now, 0)) + # The bucket leaks 1cc/second. + water_level -= (now - last_time_performed) + if water_level < 0: + water_level = 0 + num_allowed_per_period, period_in_secs = self.limits[action_name] + # Fill the bucket by 1/Nth its capacity, and hope it doesn't overflow. + capacity = period_in_secs + new_level = water_level + (capacity * 1.0 / num_allowed_per_period) + if new_level > capacity: + # Delay this many seconds. + return new_level - capacity + self._levels[key] = (now, new_level) + return None + + +# If one instance of this WSGIApps is unable to handle your load, put a +# sharding app in front that shards by username to one of many backends. + +class WSGIApp(object): + + """Application that tracks rate limits in memory. Send requests to it of + this form: + + POST /limiter/<username>/<urlencoded action> + + and receive a 200 OK, or a 403 Forbidden with an X-Wait-Seconds header + containing the number of seconds to wait before the action would succeed. + """ + + def __init__(self, limiter): + """Create the WSGI application using the given Limiter instance.""" + self.limiter = limiter + + @webob.dec.wsgify + def __call__(self, req): + parts = req.path_info.split('/') + # format: /limiter/<username>/<urlencoded action> + if req.method != 'POST': + raise webob.exc.HTTPMethodNotAllowed() + if len(parts) != 4 or parts[1] != 'limiter': + raise webob.exc.HTTPNotFound() + username = parts[2] + action_name = urllib.unquote(parts[3]) + delay = self.limiter.perform(action_name, username) + if delay: + return webob.exc.HTTPForbidden( + headers={'X-Wait-Seconds': "%.2f" % delay}) + else: + return '' # 200 OK + + +class WSGIAppProxy(object): + + """Limiter lookalike that proxies to a ratelimiting.WSGIApp.""" + + def __init__(self, service_host): + """Creates a proxy pointing to a ratelimiting.WSGIApp at the given + host.""" + self.service_host = service_host + + def perform(self, action, username='nobody'): + conn = httplib.HTTPConnection(self.service_host) + conn.request('POST', '/limiter/%s/%s' % (username, action)) + resp = conn.getresponse() + if resp.status == 200: + return None # no delay + return float(resp.getheader('X-Wait-Seconds')) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py new file mode 100644 index 000000000..cb5132635 --- /dev/null +++ b/nova/api/openstack/servers.py @@ -0,0 +1,273 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +import webob +from webob import exc + +from nova import flags +from nova import rpc +from nova import utils +from nova import wsgi +from nova.api import cloud +from nova.api.openstack import context +from nova.api.openstack import faults +from nova.compute import instance_types +from nova.compute import power_state +import nova.api.openstack +import nova.image.service + +FLAGS = flags.FLAGS + +def _filter_params(inst_dict): + """ Extracts all updatable parameters for a server update request """ + keys = dict(name='name', admin_pass='adminPass') + new_attrs = {} + for k, v in keys.items(): + if inst_dict.has_key(v): + new_attrs[k] = inst_dict[v] + return new_attrs + +def _entity_list(entities): + """ Coerces a list of servers into proper dictionary format """ + return dict(servers=entities) + +def _entity_detail(inst): + """ Maps everything to Rackspace-like attributes for return""" + power_mapping = { + power_state.NOSTATE: 'build', + power_state.RUNNING: 'active', + power_state.BLOCKED: 'active', + power_state.PAUSED: 'suspended', + power_state.SHUTDOWN: 'active', + power_state.SHUTOFF: 'active', + power_state.CRASHED: 'error' + } + inst_dict = {} + + mapped_keys = dict(status='state', imageId='image_id', + flavorId='instance_type', name='server_name', id='id') + + for k, v in mapped_keys.iteritems(): + inst_dict[k] = inst[v] + + inst_dict['status'] = power_mapping[inst_dict['status']] + inst_dict['addresses'] = dict(public=[], private=[]) + inst_dict['metadata'] = {} + inst_dict['hostId'] = '' + + return dict(server=inst_dict) + +def _entity_inst(inst): + """ Filters all model attributes save for id and name """ + return dict(server=dict(id=inst['id'], name=inst['server_name'])) + +class Controller(wsgi.Controller): + """ The Server API controller for the OpenStack API """ + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "server": [ "id", "imageId", "name", "flavorId", "hostId", + "status", "progress", "progress" ] + } + } + } + + def __init__(self, db_driver=None): + if not db_driver: + db_driver = FLAGS.db_driver + self.db_driver = utils.import_object(db_driver) + super(Controller, self).__init__() + + def index(self, req): + """ Returns a list of server names and ids for a given user """ + return self._items(req, entity_maker=_entity_inst) + + def detail(self, req): + """ Returns a list of server details for a given user """ + return self._items(req, entity_maker=_entity_detail) + + def _items(self, req, entity_maker): + """Returns a list of servers for a given user. + + entity_maker - either _entity_detail or _entity_inst + """ + user_id = req.environ['nova.context']['user']['id'] + instance_list = self.db_driver.instance_get_all_by_user(None, user_id) + limited_list = nova.api.openstack.limited(instance_list, req) + res = [entity_maker(inst)['server'] for inst in limited_list] + return _entity_list(res) + + def show(self, req, id): + """ Returns server details by server id """ + user_id = req.environ['nova.context']['user']['id'] + inst = self.db_driver.instance_get_by_internal_id(None, int(id)) + if inst: + if inst.user_id == user_id: + return _entity_detail(inst) + raise faults.Fault(exc.HTTPNotFound()) + + def delete(self, req, id): + """ Destroys a server """ + user_id = req.environ['nova.context']['user']['id'] + instance = self.db_driver.instance_get_by_internal_id(None, int(id)) + if instance and instance['user_id'] == user_id: + self.db_driver.instance_destroy(None, id) + return faults.Fault(exc.HTTPAccepted()) + return faults.Fault(exc.HTTPNotFound()) + + def create(self, req): + """ Creates a new server for a given user """ + + env = self._deserialize(req.body, req) + if not env: + return faults.Fault(exc.HTTPUnprocessableEntity()) + + #try: + inst = self._build_server_instance(req, env) + #except Exception, e: + # return faults.Fault(exc.HTTPUnprocessableEntity()) + + rpc.cast( + FLAGS.compute_topic, { + "method": "run_instance", + "args": {"instance_id": inst['id']}}) + return _entity_inst(inst) + + def update(self, req, id): + """ Updates the server name or password """ + user_id = req.environ['nova.context']['user']['id'] + + inst_dict = self._deserialize(req.body, req) + + if not inst_dict: + return faults.Fault(exc.HTTPUnprocessableEntity()) + + instance = self.db_driver.instance_get_by_internal_id(None, int(id)) + if not instance or instance.user_id != user_id: + return faults.Fault(exc.HTTPNotFound()) + + self.db_driver.instance_update(None, int(id), + _filter_params(inst_dict['server'])) + return faults.Fault(exc.HTTPNoContent()) + + def action(self, req, id): + """ multi-purpose method used to reboot, rebuild, and + resize a server """ + user_id = req.environ['nova.context']['user']['id'] + input_dict = self._deserialize(req.body, req) + try: + reboot_type = input_dict['reboot']['type'] + except Exception: + raise faults.Fault(webob.exc.HTTPNotImplemented()) + inst_ref = self.db.instance_get_by_internal_id(None, int(id)) + if not inst_ref or (inst_ref and not inst_ref.user_id == user_id): + return faults.Fault(exc.HTTPUnprocessableEntity()) + cloud.reboot(id) + + def _build_server_instance(self, req, env): + """Build instance data structure and save it to the data store.""" + ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) + inst = {} + + user_id = req.environ['nova.context']['user']['id'] + + flavor_id = env['server']['flavorId'] + + instance_type, flavor = [(k, v) for k, v in + instance_types.INSTANCE_TYPES.iteritems() + if v['flavorid'] == flavor_id][0] + + image_id = env['server']['imageId'] + img_service = utils.import_object(FLAGS.image_service) + + image = img_service.show(image_id) + + if not image: + raise Exception, "Image not found" + + inst['server_name'] = env['server']['name'] + inst['image_id'] = image_id + inst['user_id'] = user_id + inst['launch_time'] = ltime + inst['mac_address'] = utils.generate_mac() + inst['project_id'] = user_id + + inst['state_description'] = 'scheduling' + inst['kernel_id'] = image.get('kernelId', FLAGS.default_kernel) + inst['ramdisk_id'] = image.get('ramdiskId', FLAGS.default_ramdisk) + inst['reservation_id'] = utils.generate_uid('r') + + inst['display_name'] = env['server']['name'] + inst['display_description'] = env['server']['name'] + + #TODO(dietz) this may be ill advised + key_pair_ref = self.db_driver.key_pair_get_all_by_user( + None, user_id)[0] + + inst['key_data'] = key_pair_ref['public_key'] + inst['key_name'] = key_pair_ref['name'] + + #TODO(dietz) stolen from ec2 api, see TODO there + inst['security_group'] = 'default' + + # Flavor related attributes + inst['instance_type'] = instance_type + inst['memory_mb'] = flavor['memory_mb'] + inst['vcpus'] = flavor['vcpus'] + inst['local_gb'] = flavor['local_gb'] + + ref = self.db_driver.instance_create(None, inst) + inst['id'] = ref.internal_id + # TODO(dietz): this isn't explicitly necessary, but the networking + # calls depend on an object with a project_id property, and therefore + # should be cleaned up later + api_context = context.APIRequestContext(user_id) + + inst['mac_address'] = utils.generate_mac() + + #TODO(dietz) is this necessary? + inst['launch_index'] = 0 + + inst['hostname'] = str(ref.internal_id) + self.db_driver.instance_update(api_context, inst['id'], inst) + + network_manager = utils.import_object(FLAGS.network_manager) + address = network_manager.allocate_fixed_ip(api_context, + inst['id']) + + # TODO(vish): This probably should be done in the scheduler + # network is setup when host is assigned + network_topic = self._get_network_topic(api_context, network_manager) + rpc.call(network_topic, + {"method": "setup_fixed_ip", + "args": {"context": api_context, + "address": address}}) + return inst + + def _get_network_topic(self, context, network_manager): + """Retrieves the network host for a project""" + network_ref = network_manager.get_network(context) + host = network_ref['host'] + if not host: + host = rpc.call(FLAGS.network_topic, + {"method": "set_network_host", + "args": {"context": context, + "network_id": network_ref['id']}}) + return self.db_driver.queue_get_for(None, FLAGS.network_topic, host) diff --git a/nova/api/rackspace/sharedipgroups.py b/nova/api/openstack/sharedipgroups.py index 986f11434..4d2d0ede1 100644 --- a/nova/api/rackspace/sharedipgroups.py +++ b/nova/api/openstack/sharedipgroups.py @@ -15,4 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -class Controller(object): pass +from nova import wsgi + +class Controller(wsgi.Controller): pass diff --git a/nova/api/rackspace/__init__.py b/nova/api/rackspace/__init__.py deleted file mode 100644 index 27e78f801..000000000 --- a/nova/api/rackspace/__init__.py +++ /dev/null @@ -1,81 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -WSGI middleware for Rackspace API controllers. -""" - -import json -import time - -import routes -import webob.dec -import webob.exc - -from nova import flags -from nova import wsgi -from nova.api.rackspace import flavors -from nova.api.rackspace import images -from nova.api.rackspace import servers -from nova.api.rackspace import sharedipgroups -from nova.auth import manager - - -class API(wsgi.Middleware): - """WSGI entry point for all Rackspace API requests.""" - - def __init__(self): - app = AuthMiddleware(APIRouter()) - super(API, self).__init__(app) - - -class AuthMiddleware(wsgi.Middleware): - """Authorize the rackspace API request or return an HTTP Forbidden.""" - - #TODO(gundlach): isn't this the old Nova API's auth? Should it be replaced - #with correct RS API auth? - - @webob.dec.wsgify - def __call__(self, req): - context = {} - if "HTTP_X_AUTH_TOKEN" in req.environ: - context['user'] = manager.AuthManager().get_user_from_access_key( - req.environ['HTTP_X_AUTH_TOKEN']) - if context['user']: - context['project'] = manager.AuthManager().get_project( - context['user'].name) - if "user" not in context: - return webob.exc.HTTPForbidden() - req.environ['nova.context'] = context - return self.application - - -class APIRouter(wsgi.Router): - """ - Routes requests on the Rackspace API to the appropriate controller - and method. - """ - - def __init__(self): - mapper = routes.Mapper() - mapper.resource("server", "servers", controller=servers.Controller()) - mapper.resource("image", "images", controller=images.Controller()) - mapper.resource("flavor", "flavors", controller=flavors.Controller()) - mapper.resource("sharedipgroup", "sharedipgroups", - controller=sharedipgroups.Controller()) - super(APIRouter, self).__init__(mapper) diff --git a/nova/api/rackspace/servers.py b/nova/api/rackspace/servers.py deleted file mode 100644 index 25d1fe9c8..000000000 --- a/nova/api/rackspace/servers.py +++ /dev/null @@ -1,83 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from nova import rpc -from nova.compute import model as compute -from nova.api.rackspace import base - - -class Controller(base.Controller): - entity_name = 'servers' - - def index(self, **kwargs): - instances = [] - for inst in compute.InstanceDirectory().all: - instances.append(instance_details(inst)) - - def show(self, **kwargs): - instance_id = kwargs['id'] - return compute.InstanceDirectory().get(instance_id) - - def delete(self, **kwargs): - instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) - if not instance: - raise ServerNotFound("The requested server was not found") - instance.destroy() - return True - - def create(self, **kwargs): - inst = self.build_server_instance(kwargs['server']) - rpc.cast( - FLAGS.compute_topic, { - "method": "run_instance", - "args": {"instance_id": inst.instance_id}}) - - def update(self, **kwargs): - instance_id = kwargs['id'] - instance = compute.InstanceDirectory().get(instance_id) - if not instance: - raise ServerNotFound("The requested server was not found") - instance.update(kwargs['server']) - instance.save() - - def build_server_instance(self, env): - """Build instance data structure and save it to the data store.""" - reservation = utils.generate_uid('r') - ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()) - inst = self.instdir.new() - inst['name'] = env['server']['name'] - inst['image_id'] = env['server']['imageId'] - inst['instance_type'] = env['server']['flavorId'] - inst['user_id'] = env['user']['id'] - inst['project_id'] = env['project']['id'] - inst['reservation_id'] = reservation - inst['launch_time'] = ltime - inst['mac_address'] = utils.generate_mac() - address = self.network.allocate_ip( - inst['user_id'], - inst['project_id'], - mac=inst['mac_address']) - inst['private_dns_name'] = str(address) - inst['bridge_name'] = network.BridgedNetwork.get_network_for_project( - inst['user_id'], - inst['project_id'], - 'default')['bridge_name'] - # key_data, key_name, ami_launch_index - # TODO(todd): key data or root password - inst.save() - return inst |
