diff options
| author | Vishvananda Ishaya <vishvananda@gmail.com> | 2011-01-18 18:29:56 -0800 |
|---|---|---|
| committer | Vishvananda Ishaya <vishvananda@gmail.com> | 2011-01-18 18:29:56 -0800 |
| commit | eb33a6b78b8d802c3f92a80e5d5e4a60aef5bf68 (patch) | |
| tree | be28644743097fd8228134f31be9f3dc683db7c8 /nova | |
| parent | 324d8fdf284bd5109e34692049256722d731b572 (diff) | |
| parent | 4eed55b46cfaba58b5d344f0ca96eba090d8bd34 (diff) | |
| download | nova-eb33a6b78b8d802c3f92a80e5d5e4a60aef5bf68.tar.gz nova-eb33a6b78b8d802c3f92a80e5d5e4a60aef5bf68.tar.xz nova-eb33a6b78b8d802c3f92a80e5d5e4a60aef5bf68.zip | |
merged trunk
Diffstat (limited to 'nova')
115 files changed, 7457 insertions, 1975 deletions
diff --git a/nova/api/__init__.py b/nova/api/__init__.py index 26fed847b..0fedbbfad 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -15,97 +15,5 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Root WSGI middleware for all API controllers. -**Related Flags** - -:osapi_subdomain: subdomain running the OpenStack API (default: api) -:ec2api_subdomain: subdomain running the EC2 API (default: ec2) - -""" -import logging - -import routes -import webob.dec - -from nova import flags -from nova import wsgi -from nova.api import ec2 -from nova.api import openstack -from nova.api.ec2 import metadatarequesthandler - - -flags.DEFINE_string('osapi_subdomain', 'api', - 'subdomain running the OpenStack API') -flags.DEFINE_string('ec2api_subdomain', 'ec2', - 'subdomain running the EC2 API') - -FLAGS = flags.FLAGS - - -class API(wsgi.Router): - """Routes top-level requests to the appropriate controller.""" - - def __init__(self, default_api): - osapi_subdomain = {'sub_domain': [FLAGS.osapi_subdomain]} - ec2api_subdomain = {'sub_domain': [FLAGS.ec2api_subdomain]} - if default_api == 'os': - osapi_subdomain = {} - elif default_api == 'ec2': - ec2api_subdomain = {} - mapper = routes.Mapper() - mapper.sub_domains = True - - mapper.connect("/", controller=self.osapi_versions, - conditions=osapi_subdomain) - mapper.connect("/v1.0/{path_info:.*}", controller=openstack.API(), - conditions=osapi_subdomain) - - mapper.connect("/", controller=self.ec2api_versions, - conditions=ec2api_subdomain) - mapper.connect("/services/{path_info:.*}", controller=ec2.API(), - conditions=ec2api_subdomain) - mrh = metadatarequesthandler.MetadataRequestHandler() - for s in ['/latest', - '/2009-04-04', - '/2008-09-01', - '/2008-02-01', - '/2007-12-15', - '/2007-10-10', - '/2007-08-29', - '/2007-03-01', - '/2007-01-19', - '/1.0']: - mapper.connect('%s/{path_info:.*}' % s, controller=mrh, - conditions=ec2api_subdomain) - - super(API, self).__init__(mapper) - - @webob.dec.wsgify - def osapi_versions(self, req): - """Respond to a request for all OpenStack API versions.""" - response = { - "versions": [ - dict(status="CURRENT", id="v1.0")]} - metadata = { - "application/xml": { - "attributes": dict(version=["status", "id"])}} - return wsgi.Serializer(req.environ, metadata).to_content_type(response) - - @webob.dec.wsgify - def ec2api_versions(self, req): - """Respond to a request for all EC2 versions.""" - # available api versions - versions = [ - '1.0', - '2007-01-19', - '2007-03-01', - '2007-08-29', - '2007-10-10', - '2007-12-15', - '2008-02-01', - '2008-09-01', - '2009-04-04', - ] - return ''.join('%s\n' % v for v in versions) +"""No-op __init__ for directory full of api goodies.""" diff --git a/nova/api/direct.py b/nova/api/direct.py new file mode 100644 index 000000000..81b3ae202 --- /dev/null +++ b/nova/api/direct.py @@ -0,0 +1,232 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Public HTTP interface that allows services to self-register. + +The general flow of a request is: + - Request is parsed into WSGI bits. + - Some middleware checks authentication. + - Routing takes place based on the URL to find a controller. + (/controller/method) + - Parameters are parsed from the request and passed to a method on the + controller as keyword arguments. + - Optionally 'json' is decoded to provide all the parameters. + - Actual work is done and a result is returned. + - That result is turned into json and returned. + +""" + +import inspect +import urllib + +import routes +import webob + +from nova import context +from nova import flags +from nova import utils +from nova import wsgi + + +ROUTES = {} + + +def register_service(path, handle): + ROUTES[path] = handle + + +class Router(wsgi.Router): + def __init__(self, mapper=None): + if mapper is None: + mapper = routes.Mapper() + + self._load_registered_routes(mapper) + super(Router, self).__init__(mapper=mapper) + + def _load_registered_routes(self, mapper): + for route in ROUTES: + mapper.connect('/%s/{action}' % route, + controller=ServiceWrapper(ROUTES[route])) + + +class DelegatedAuthMiddleware(wsgi.Middleware): + def process_request(self, request): + os_user = request.headers['X-OpenStack-User'] + os_project = request.headers['X-OpenStack-Project'] + context_ref = context.RequestContext(user=os_user, project=os_project) + request.environ['openstack.context'] = context_ref + + +class JsonParamsMiddleware(wsgi.Middleware): + def process_request(self, request): + if 'json' not in request.params: + return + + params_json = request.params['json'] + params_parsed = utils.loads(params_json) + params = {} + for k, v in params_parsed.iteritems(): + if k in ('self', 'context'): + continue + if k.startswith('_'): + continue + params[k] = v + + request.environ['openstack.params'] = params + + +class PostParamsMiddleware(wsgi.Middleware): + def process_request(self, request): + params_parsed = request.params + params = {} + for k, v in params_parsed.iteritems(): + if k in ('self', 'context'): + continue + if k.startswith('_'): + continue + params[k] = v + + request.environ['openstack.params'] = params + + +class Reflection(object): + """Reflection methods to list available methods.""" + def __init__(self): + self._methods = {} + self._controllers = {} + + def _gather_methods(self): + methods = {} + controllers = {} + for route, handler in ROUTES.iteritems(): + controllers[route] = handler.__doc__.split('\n')[0] + for k in dir(handler): + if k.startswith('_'): + continue + f = getattr(handler, k) + if not callable(f): + continue + + # bunch of ugly formatting stuff + argspec = inspect.getargspec(f) + args = [x for x in argspec[0] + if x != 'self' and x != 'context'] + defaults = argspec[3] and argspec[3] or [] + args_r = list(reversed(args)) + defaults_r = list(reversed(defaults)) + + args_out = [] + while args_r: + if defaults_r: + args_out.append((args_r.pop(0), + repr(defaults_r.pop(0)))) + else: + args_out.append((str(args_r.pop(0)),)) + + # if the method accepts keywords + if argspec[2]: + args_out.insert(0, ('**%s' % argspec[2],)) + + methods['/%s/%s' % (route, k)] = { + 'short_doc': f.__doc__.split('\n')[0], + 'doc': f.__doc__, + 'name': k, + 'args': list(reversed(args_out))} + + self._methods = methods + self._controllers = controllers + + def get_controllers(self, context): + """List available controllers.""" + if not self._controllers: + self._gather_methods() + + return self._controllers + + def get_methods(self, context): + """List available methods.""" + if not self._methods: + self._gather_methods() + + method_list = self._methods.keys() + method_list.sort() + methods = {} + for k in method_list: + methods[k] = self._methods[k]['short_doc'] + return methods + + def get_method_info(self, context, method): + """Get detailed information about a method.""" + if not self._methods: + self._gather_methods() + return self._methods[method] + + +class ServiceWrapper(wsgi.Controller): + def __init__(self, service_handle): + self.service_handle = service_handle + + @webob.dec.wsgify + def __call__(self, req): + arg_dict = req.environ['wsgiorg.routing_args'][1] + action = arg_dict['action'] + del arg_dict['action'] + + context = req.environ['openstack.context'] + # allow middleware up the stack to override the params + params = {} + if 'openstack.params' in req.environ: + params = req.environ['openstack.params'] + + # TODO(termie): do some basic normalization on methods + method = getattr(self.service_handle, action) + + result = method(context, **params) + if type(result) is dict or type(result) is list: + return self._serialize(result, req) + else: + return result + + +class Proxy(object): + """Pretend a Direct API endpoint is an object.""" + def __init__(self, app, prefix=None): + self.app = app + self.prefix = prefix + + def __do_request(self, path, context, **kwargs): + req = webob.Request.blank(path) + req.method = 'POST' + req.body = urllib.urlencode({'json': utils.dumps(kwargs)}) + req.environ['openstack.context'] = context + resp = req.get_response(self.app) + try: + return utils.loads(resp.body) + except Exception: + return resp.body + + def __getattr__(self, key): + if self.prefix is None: + return self.__class__(self.app, prefix=key) + + def _wrapper(context, **kwargs): + return self.__do_request('/%s/%s' % (self.prefix, key), + context, + **kwargs) + _wrapper.func_name = key + return _wrapper diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index aa3bfaeb4..238cb0f38 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -20,7 +20,7 @@ Starting point for routing EC2 requests. """ -import logging +import datetime import routes import webob import webob.dec @@ -29,19 +29,18 @@ import webob.exc from nova import context from nova import exception from nova import flags +from nova import log as logging +from nova import utils from nova import wsgi from nova.api.ec2 import apirequest -from nova.api.ec2 import admin -from nova.api.ec2 import cloud from nova.auth import manager FLAGS = flags.FLAGS +LOG = logging.getLogger("nova.api") flags.DEFINE_boolean('use_forwarded_for', False, 'Treat X-Forwarded-For as the canonical remote address. ' 'Only enable this if you have a sanitizing proxy.') -flags.DEFINE_boolean('use_lockout', False, - 'Whether or not to use lockout middleware.') flags.DEFINE_integer('lockout_attempts', 5, 'Number of failed auths before lockout.') flags.DEFINE_integer('lockout_minutes', 15, @@ -52,17 +51,42 @@ flags.DEFINE_list('lockout_memcached_servers', None, 'Memcached servers or None for in process cache.') -_log = logging.getLogger("api") -_log.setLevel(logging.DEBUG) +class RequestLogging(wsgi.Middleware): + """Access-Log akin logging for all EC2 API requests.""" - -class API(wsgi.Middleware): - """Routing for all EC2 API requests.""" - - def __init__(self): - self.application = Authenticate(Router(Authorizer(Executor()))) - if FLAGS.use_lockout: - self.application = Lockout(self.application) + @webob.dec.wsgify + def __call__(self, req): + rv = req.get_response(self.application) + self.log_request_completion(rv, req) + return rv + + def log_request_completion(self, response, request): + controller = request.environ.get('ec2.controller', None) + if controller: + controller = controller.__class__.__name__ + action = request.environ.get('ec2.action', None) + ctxt = request.environ.get('ec2.context', None) + seconds = 'X' + microseconds = 'X' + if ctxt: + delta = datetime.datetime.utcnow() - \ + ctxt.timestamp + seconds = delta.seconds + microseconds = delta.microseconds + LOG.info( + "%s.%ss %s %s %s %s:%s %s [%s] %s %s", + seconds, + microseconds, + request.remote_addr, + request.method, + request.path_info, + controller, + action, + response.status_int, + request.user_agent, + request.content_type, + response.content_type, + context=ctxt) class Lockout(wsgi.Middleware): @@ -98,7 +122,7 @@ class Lockout(wsgi.Middleware): failures_key = "authfailures-%s" % access_key failures = int(self.mc.get(failures_key) or 0) if failures >= FLAGS.lockout_attempts: - detail = "Too many failed authentications." + detail = _("Too many failed authentications.") raise webob.exc.HTTPForbidden(detail=detail) res = req.get_response(self.application) if res.status_int == 403: @@ -107,9 +131,9 @@ class Lockout(wsgi.Middleware): # NOTE(vish): To use incr, failures has to be a string. self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60) elif failures >= FLAGS.lockout_attempts: - _log.warn('Access key %s has had %d failed authentications' - ' and will be locked out for %d minutes.' % - (access_key, failures, FLAGS.lockout_minutes)) + LOG.warn(_('Access key %s has had %d failed authentications' + ' and will be locked out for %d minutes.'), + access_key, failures, FLAGS.lockout_minutes) self.mc.set(failures_key, str(failures), time=FLAGS.lockout_minutes * 60) return res @@ -142,8 +166,9 @@ class Authenticate(wsgi.Middleware): req.method, req.host, req.path) - except exception.Error, ex: - logging.debug(_("Authentication Failure: %s") % ex) + # Be explicit for what exceptions are 403, the rest bubble as 500 + except (exception.NotFound, exception.NotAuthorized) as ex: + LOG.audit(_("Authentication Failure: %s"), str(ex)) raise webob.exc.HTTPForbidden() # Authenticated! @@ -154,29 +179,19 @@ class Authenticate(wsgi.Middleware): project=project, remote_address=remote_address) req.environ['ec2.context'] = ctxt + LOG.audit(_('Authenticated Request For %s:%s)'), user.name, + project.name, context=req.environ['ec2.context']) return self.application -class Router(wsgi.Middleware): - - """Add ec2.'controller', .'action', and .'action_args' to WSGI environ.""" +class Requestify(wsgi.Middleware): - def __init__(self, application): - super(Router, self).__init__(application) - self.map = routes.Mapper() - self.map.connect("/{controller_name}/") - self.controllers = dict(Cloud=cloud.CloudController(), - Admin=admin.AdminController()) + def __init__(self, app, controller): + super(Requestify, self).__init__(app) + self.controller = utils.import_class(controller)() @webob.dec.wsgify def __call__(self, req): - # Obtain the appropriate controller and action for this request. - try: - match = self.map.match(req.path_info) - controller_name = match['controller_name'] - controller = self.controllers[controller_name] - except: - raise webob.exc.HTTPNotFound() non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Version', 'Timestamp'] args = dict(req.params) @@ -189,13 +204,13 @@ class Router(wsgi.Middleware): except: raise webob.exc.HTTPBadRequest() - _log.debug(_('action: %s') % action) + LOG.debug(_('action: %s'), action) for key, value in args.items(): - _log.debug(_('arg: %s\t\tval: %s') % (key, value)) + LOG.debug(_('arg: %s\t\tval: %s'), key, value) # Success! - req.environ['ec2.controller'] = controller - req.environ['ec2.action'] = action + api_request = apirequest.APIRequest(self.controller, action, args) + req.environ['ec2.request'] = api_request req.environ['ec2.action_args'] = args return self.application @@ -256,13 +271,14 @@ class Authorizer(wsgi.Middleware): @webob.dec.wsgify def __call__(self, req): context = req.environ['ec2.context'] - controller_name = req.environ['ec2.controller'].__class__.__name__ - action = req.environ['ec2.action'] - allowed_roles = self.action_roles[controller_name].get(action, - ['none']) + controller = req.environ['ec2.request'].controller.__class__.__name__ + action = req.environ['ec2.request'].action + allowed_roles = self.action_roles[controller].get(action, ['none']) if self._matches_any_role(context, allowed_roles): return self.application else: + LOG.audit(_("Unauthorized request for controller=%s " + "and action=%s"), controller, action, context=context) raise webob.exc.HTTPUnauthorized() def _matches_any_role(self, context, roles): @@ -289,23 +305,28 @@ class Executor(wsgi.Application): @webob.dec.wsgify def __call__(self, req): context = req.environ['ec2.context'] - controller = req.environ['ec2.controller'] - action = req.environ['ec2.action'] - args = req.environ['ec2.action_args'] - - api_request = apirequest.APIRequest(controller, action) + api_request = req.environ['ec2.request'] result = None try: - result = api_request.send(context, **args) + result = api_request.invoke(context) + except exception.NotFound as ex: + LOG.info(_('NotFound raised: %s'), str(ex), context=context) + return self._error(req, context, type(ex).__name__, str(ex)) except exception.ApiError as ex: - + LOG.exception(_('ApiError raised: %s'), str(ex), context=context) if ex.code: - return self._error(req, ex.code, ex.message) + return self._error(req, context, ex.code, str(ex)) else: - return self._error(req, type(ex).__name__, ex.message) - # TODO(vish): do something more useful with unknown exceptions + return self._error(req, context, type(ex).__name__, str(ex)) except Exception as ex: - return self._error(req, type(ex).__name__, str(ex)) + extra = {'environment': req.environ} + LOG.exception(_('Unexpected error raised: %s'), str(ex), + extra=extra, context=context) + return self._error(req, + context, + 'UnknownError', + _('An unknown error has occurred. ' + 'Please try your request again.')) else: resp = webob.Response() resp.status = 200 @@ -313,15 +334,16 @@ class Executor(wsgi.Application): resp.body = str(result) return resp - def _error(self, req, code, message): - logging.error("%s: %s", code, message) + def _error(self, req, context, code, message): + LOG.error("%s: %s", code, message, context=context) resp = webob.Response() resp.status = 400 resp.headers['Content-Type'] = 'text/xml' resp.body = str('<?xml version="1.0"?>\n' - '<Response><Errors><Error><Code>%s</Code>' - '<Message>%s</Message></Error></Errors>' - '<RequestID>?</RequestID></Response>' % (code, message)) + '<Response><Errors><Error><Code>%s</Code>' + '<Message>%s</Message></Error></Errors>' + '<RequestID>%s</RequestID></Response>' % + (code, message, context.request_id)) return resp @@ -343,29 +365,3 @@ class Versions(wsgi.Application): '2009-04-04', ] return ''.join('%s\n' % v for v in versions) - - -def authenticate_factory(global_args, **local_args): - def authenticator(app): - return Authenticate(app) - return authenticator - - -def router_factory(global_args, **local_args): - def router(app): - return Router(app) - return router - - -def authorizer_factory(global_args, **local_args): - def authorizer(app): - return Authorizer(app) - return authorizer - - -def executor_factory(global_args, **local_args): - return Executor() - - -def versions_factory(global_args, **local_args): - return Versions() diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index fac01369e..758b612e8 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -24,9 +24,13 @@ import base64 from nova import db from nova import exception +from nova import log as logging from nova.auth import manager +LOG = logging.getLogger('nova.api.ec2.admin') + + def user_dict(user, base64_file=None): """Convert the user object to a result dict""" if user: @@ -75,17 +79,18 @@ class AdminController(object): return {'userSet': [user_dict(u) for u in manager.AuthManager().get_users()]} - def register_user(self, _context, name, **_kwargs): + def register_user(self, context, name, **_kwargs): """Creates a new user, and returns generated credentials.""" + LOG.audit(_("Creating new user: %s"), name, context=context) return user_dict(manager.AuthManager().create_user(name)) - def deregister_user(self, _context, name, **_kwargs): + def deregister_user(self, context, name, **_kwargs): """Deletes a single user (NOT undoable.) Should throw an exception if the user has instances, volumes, or buckets remaining. """ + LOG.audit(_("Deleting user: %s"), name, context=context) manager.AuthManager().delete_user(name) - return True def describe_roles(self, context, project_roles=True, **kwargs): @@ -105,15 +110,27 @@ class AdminController(object): operation='add', **kwargs): """Add or remove a role for a user and project.""" if operation == 'add': + if project: + LOG.audit(_("Adding role %s to user %s for project %s"), role, + user, project, context=context) + else: + LOG.audit(_("Adding sitewide role %s to user %s"), role, user, + context=context) manager.AuthManager().add_role(user, role, project) elif operation == 'remove': + if project: + LOG.audit(_("Removing role %s from user %s for project %s"), + role, user, project, context=context) + else: + LOG.audit(_("Removing sitewide role %s from user %s"), role, + user, context=context) manager.AuthManager().remove_role(user, role, project) else: - raise exception.ApiError('operation must be add or remove') + raise exception.ApiError(_('operation must be add or remove')) return True - def generate_x509_for_user(self, _context, name, project=None, **kwargs): + def generate_x509_for_user(self, context, name, project=None, **kwargs): """Generates and returns an x509 certificate for a single user. Is usually called from a client that will wrap this with access and secret key info, and return a zip file. @@ -122,6 +139,8 @@ class AdminController(object): project = name project = manager.AuthManager().get_project(project) user = manager.AuthManager().get_user(name) + LOG.audit(_("Getting x509 for user: %s on project: %s"), name, + project, context=context) return user_dict(user, base64.b64encode(project.get_credentials(user))) def describe_project(self, context, name, **kwargs): @@ -137,6 +156,8 @@ class AdminController(object): def register_project(self, context, name, manager_user, description=None, member_users=None, **kwargs): """Creates a new project""" + LOG.audit(_("Create project %s managed by %s"), name, manager_user, + context=context) return project_dict( manager.AuthManager().create_project( name, @@ -146,6 +167,7 @@ class AdminController(object): def deregister_project(self, context, name): """Permanently deletes a project.""" + LOG.audit(_("Delete project: %s"), name, context=context) manager.AuthManager().delete_project(name) return True @@ -159,11 +181,15 @@ class AdminController(object): **kwargs): """Add or remove a user from a project.""" if operation == 'add': + LOG.audit(_("Adding user %s to project %s"), user, project, + context=context) manager.AuthManager().add_to_project(user, project) elif operation == 'remove': + LOG.audit(_("Removing user %s from project %s"), user, project, + context=context) manager.AuthManager().remove_from_project(user, project) else: - raise exception.ApiError('operation must be add or remove') + raise exception.ApiError(_('operation must be add or remove')) return True # FIXME(vish): these host commands don't work yet, perhaps some of the diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index a90fbeb0c..78576470a 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -20,13 +20,13 @@ APIRequest class """ -import logging import re # TODO(termie): replace minidom with etree from xml.dom import minidom -_log = logging.getLogger("api") -_log.setLevel(logging.DEBUG) +from nova import log as logging + +LOG = logging.getLogger("nova.api.request") _c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') @@ -83,24 +83,25 @@ def _try_convert(value): class APIRequest(object): - def __init__(self, controller, action): + def __init__(self, controller, action, args): self.controller = controller self.action = action + self.args = args - def send(self, context, **kwargs): + def invoke(self, context): try: method = getattr(self.controller, _camelcase_to_underscore(self.action)) except AttributeError: _error = _('Unsupported API request: controller = %s,' 'action = %s') % (self.controller, self.action) - _log.warning(_error) + LOG.exception(_error) # TODO: Raise custom exception, trap in apiserver, # and reraise as 400 error. raise Exception(_error) args = {} - for key, value in kwargs.items(): + for key, value in self.args.items(): parts = key.split(".") key = _camelcase_to_underscore(parts[0]) if isinstance(value, str) or isinstance(value, unicode): @@ -142,7 +143,7 @@ class APIRequest(object): response = xml.toxml() xml.unlink() - _log.debug(response) + LOG.debug(response) return response def _render_dict(self, xml, el, data): @@ -151,7 +152,7 @@ class APIRequest(object): val = data[key] el.appendChild(self._render_data(xml, key, val)) except: - _log.debug(data) + LOG.debug(data) raise def _render_data(self, xml, el_name, data): diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 0c0027287..57d41ed67 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -24,26 +24,28 @@ datastore. import base64 import datetime -import logging -import re +import IPy import os +import urllib +from nova import compute from nova import context -import IPy -from nova import compute from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import network -from nova import rpc from nova import utils from nova import volume from nova.compute import instance_types FLAGS = flags.FLAGS +flags.DECLARE('service_down_time', 'nova.scheduler.driver') + +LOG = logging.getLogger("nova.api.cloud") InvalidInputException = exception.InvalidInputException @@ -72,17 +74,13 @@ def _gen_key(context, user_id, key_name): def ec2_id_to_id(ec2_id): - """Convert an ec2 ID (i-[base 36 number]) to an instance id (int)""" - return int(ec2_id[2:], 36) + """Convert an ec2 ID (i-[base 16 number]) to an instance id (int)""" + return int(ec2_id.split('-')[-1], 16) -def id_to_ec2_id(instance_id): - """Convert an instance ID (int) to an ec2 ID (i-[base 36 number])""" - digits = [] - while instance_id != 0: - instance_id, remainder = divmod(instance_id, 36) - digits.append('0123456789abcdefghijklmnopqrstuvwxyz'[remainder]) - return "i-%s" % ''.join(reversed(digits)) +def id_to_ec2_id(instance_id, template='i-%08x'): + """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])""" + return template % instance_id class CloudController(object): @@ -94,8 +92,11 @@ class CloudController(object): self.image_service = utils.import_object(FLAGS.image_service) self.network_api = network.API() self.volume_api = volume.API() - self.compute_api = compute.API(self.image_service, self.network_api, - self.volume_api) + self.compute_api = compute.API( + network_api=self.network_api, + image_service=self.image_service, + volume_api=self.volume_api, + hostname_factory=id_to_ec2_id) self.setup() def __str__(self): @@ -131,14 +132,11 @@ class CloudController(object): result[key] = [line] return result - def _trigger_refresh_security_group(self, context, security_group): - nodes = set([instance['host'] for instance in security_group.instances - if instance['host'] is not None]) - for node in nodes: - rpc.cast(context, - '%s.%s' % (FLAGS.compute_topic, node), - {"method": "refresh_security_group", - "args": {"security_group_id": security_group.id}}) + def _get_availability_zone_by_host(self, context, host): + services = db.service_get_all_by_host(context, host) + if len(services) > 0: + return services[0]['availability_zone'] + return 'unknown zone' def get_metadata(self, address): ctxt = context.get_admin_context() @@ -152,6 +150,8 @@ class CloudController(object): else: keys = '' hostname = instance_ref['hostname'] + host = instance_ref['host'] + availability_zone = self._get_availability_zone_by_host(ctxt, host) floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) ec2_id = id_to_ec2_id(instance_ref['id']) @@ -174,8 +174,7 @@ class CloudController(object): 'local-hostname': hostname, 'local-ipv4': address, 'kernel-id': instance_ref['kernel_id'], - # TODO(vish): real zone - 'placement': {'availability-zone': 'nova'}, + 'placement': {'availability-zone': availability_zone}, 'public-hostname': hostname, 'public-ipv4': floating_ip or '', 'public-keys': keys, @@ -199,15 +198,33 @@ class CloudController(object): return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): - return {'availabilityZoneInfo': [{'zoneName': 'nova', - 'zoneState': 'available'}]} + enabled_services = db.service_get_all(context) + disabled_services = db.service_get_all(context, True) + available_zones = [] + for zone in [service.availability_zone for service + in enabled_services]: + if not zone in available_zones: + available_zones.append(zone) + not_available_zones = [] + for zone in [service.availability_zone for service in disabled_services + if not service['availability_zone'] in available_zones]: + if not zone in not_available_zones: + not_available_zones.append(zone) + result = [] + for zone in available_zones: + result.append({'zoneName': zone, + 'zoneState': "available"}) + for zone in not_available_zones: + result.append({'zoneName': zone, + 'zoneState': "not available"}) + return {'availabilityZoneInfo': result} def _describe_availability_zones_verbose(self, context, **kwargs): rv = {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} services = db.service_get_all(context) - now = db.get_time() + now = datetime.datetime.utcnow() hosts = [] for host in [service['host'] for service in services]: if not host in hosts: @@ -237,16 +254,17 @@ class CloudController(object): name, _sep, host = region.partition('=') endpoint = '%s://%s:%s%s' % (FLAGS.ec2_prefix, host, - FLAGS.cc_port, + FLAGS.ec2_port, FLAGS.ec2_suffix) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_prefix, - FLAGS.cc_host, - FLAGS.cc_port, + FLAGS.ec2_host, + FLAGS.ec2_port, FLAGS.ec2_suffix)}] + return {'regionInfo': regions} def describe_snapshots(self, context, @@ -282,6 +300,7 @@ class CloudController(object): return {'keypairsSet': result} def create_key_pair(self, context, key_name, **kwargs): + LOG.audit(_("Create key pair %s"), key_name, context=context) data = _gen_key(context, context.user.id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], @@ -289,6 +308,7 @@ class CloudController(object): # TODO(vish): when context is no longer an object, pass it here def delete_key_pair(self, context, key_name, **kwargs): + LOG.audit(_("Delete key pair %s"), key_name, context=context) try: db.key_pair_destroy(context, context.user.id, key_name) except exception.NotFound: @@ -349,6 +369,7 @@ class CloudController(object): values['group_id'] = source_security_group['id'] elif cidr_ip: # If this fails, it throws an exception. This is what we want. + cidr_ip = urllib.unquote(cidr_ip).decode() IPy.IP(cidr_ip) values['cidr'] = cidr_ip else: @@ -395,6 +416,8 @@ class CloudController(object): return False def revoke_security_group_ingress(self, context, group_name, **kwargs): + LOG.audit(_("Revoke security group ingress %s"), group_name, + context=context) self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, @@ -402,8 +425,8 @@ class CloudController(object): criteria = self._revoke_rule_args_to_dict(context, **kwargs) if criteria == None: - raise exception.ApiError(_("No rule for the specified " - "parameters.")) + raise exception.ApiError(_("Not enough parameters to build a " + "valid rule.")) for rule in security_group.rules: match = True @@ -412,7 +435,8 @@ class CloudController(object): match = False if match: db.security_group_rule_destroy(context, rule['id']) - self._trigger_refresh_security_group(context, security_group) + self.compute_api.trigger_security_group_rules_refresh(context, + security_group['id']) return True raise exception.ApiError(_("No rule for the specified parameters.")) @@ -421,12 +445,17 @@ class CloudController(object): # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name, **kwargs): + LOG.audit(_("Authorize security group ingress %s"), group_name, + context=context) self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, group_name) values = self._revoke_rule_args_to_dict(context, **kwargs) + if values is None: + raise exception.ApiError(_("Not enough parameters to build a " + "valid rule.")) values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): @@ -435,7 +464,8 @@ class CloudController(object): security_group_rule = db.security_group_rule_create(context, values) - self._trigger_refresh_security_group(context, security_group) + self.compute_api.trigger_security_group_rules_refresh(context, + security_group['id']) return True @@ -457,6 +487,7 @@ class CloudController(object): return source_project_id def create_security_group(self, context, group_name, group_description): + LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): raise exception.ApiError(_('group %s already exists') % group_name) @@ -471,6 +502,7 @@ class CloudController(object): group_ref)]} def delete_security_group(self, context, group_name, **kwargs): + LOG.audit(_("Delete security group %s"), group_name, context=context) security_group = db.security_group_get_by_name(context, context.project_id, group_name) @@ -478,22 +510,26 @@ class CloudController(object): return True def get_console_output(self, context, instance_id, **kwargs): + LOG.audit(_("Get console output for instance %s"), instance_id, + context=context) # instance_id is passed in as a list of instances ec2_id = instance_id[0] instance_id = ec2_id_to_id(ec2_id) - instance_ref = self.compute_api.get(context, instance_id) - output = rpc.call(context, - '%s.%s' % (FLAGS.compute_topic, - instance_ref['host']), - {"method": "get_console_output", - "args": {"instance_id": instance_ref['id']}}) - + output = self.compute_api.get_console_output( + context, instance_id=instance_id) now = datetime.datetime.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} + def get_ajax_console(self, context, instance_id, **kwargs): + ec2_id = instance_id[0] + internal_id = ec2_id_to_id(ec2_id) + return self.compute_api.get_ajax_console(context, internal_id) + def describe_volumes(self, context, volume_id=None, **kwargs): + if volume_id: + volume_id = [ec2_id_to_id(x) for x in volume_id] volumes = self.volume_api.get_all(context) # NOTE(vish): volume_id is an optional list of volume ids to filter by. volumes = [self._format_volume(context, v) for v in volumes @@ -509,7 +545,7 @@ class CloudController(object): instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} - v['volumeId'] = volume['id'] + v['volumeId'] = id_to_ec2_id(volume['id'], 'vol-%08x') v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] @@ -527,7 +563,8 @@ class CloudController(object): 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', - 'volume_id': volume['ec2_id']}] + 'volumeId': id_to_ec2_id(volume['id'], + 'vol-%08x')}] else: v['attachmentSet'] = [{}] @@ -536,19 +573,22 @@ class CloudController(object): return v def create_volume(self, context, size, **kwargs): + LOG.audit(_("Create volume of %s GB"), size, context=context) volume = self.volume_api.create(context, size, kwargs.get('display_name'), kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. - return {'volumeSet': [self._format_volume(context, dict(volume_ref))]} + return {'volumeSet': [self._format_volume(context, dict(volume))]} def delete_volume(self, context, volume_id, **kwargs): - self.volume_api.delete(context, volume_id) + volume_id = ec2_id_to_id(volume_id) + self.volume_api.delete(context, volume_id=volume_id) return True def update_volume(self, context, volume_id, **kwargs): + volume_id = ec2_id_to_id(volume_id) updatable_fields = ['display_name', 'display_description'] changes = {} for field in updatable_fields: @@ -559,24 +599,33 @@ class CloudController(object): return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - self.compute_api.attach_volume(context, instance_id, volume_id, device) + volume_id = ec2_id_to_id(volume_id) + instance_id = ec2_id_to_id(instance_id) + LOG.audit(_("Attach volume %s to instance %s at %s"), volume_id, + instance_id, device, context=context) + self.compute_api.attach_volume(context, + instance_id=instance_id, + volume_id=volume_id, + device=device) volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], - 'instanceId': instance_id, + 'instanceId': id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], - 'volumeId': volume_id} + 'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')} def detach_volume(self, context, volume_id, **kwargs): + volume_id = ec2_id_to_id(volume_id) + LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) - instance = self.compute_api.detach_volume(context, volume_id) + instance = self.compute_api.detach_volume(context, volume_id=volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], - 'volumeId': volume_id} + 'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')} def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -586,19 +635,32 @@ class CloudController(object): return [{label: x} for x in lst] def describe_instances(self, context, **kwargs): - return self._format_describe_instances(context) + return self._format_describe_instances(context, **kwargs) + + def describe_instances_v6(self, context, **kwargs): + kwargs['use_v6'] = True + return self._format_describe_instances(context, **kwargs) - def _format_describe_instances(self, context): - return {'reservationSet': self._format_instances(context)} + def _format_describe_instances(self, context, **kwargs): + return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): - i = self._format_instances(context, reservation_id) + i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] - def _format_instances(self, context, **kwargs): + def _format_instances(self, context, instance_id=None, **kwargs): + # TODO(termie): this method is poorly named as its name does not imply + # that it will be making a variety of database calls + # rather than simply formatting a bunch of instances that + # were handed to it reservations = {} - instances = self.compute_api.get_all(context, **kwargs) + # NOTE(vish): instance_id is an optional list of ids to filter by + if instance_id: + instance_id = [ec2_id_to_id(x) for x in instance_id] + instances = [self.compute_api.get(context, x) for x in instance_id] + else: + instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: @@ -618,10 +680,16 @@ class CloudController(object): if instance['fixed_ip']['floating_ips']: fixed = instance['fixed_ip'] floating_addr = fixed['floating_ips'][0]['address'] + if instance['fixed_ip']['network'] and 'use_v6' in kwargs: + i['dnsNameV6'] = utils.to_global_ipv6( + instance['fixed_ip']['network']['cidr_v6'], + instance['mac_address']) + i['privateDnsName'] = fixed_addr i['publicDnsName'] = floating_addr i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] + if context.user.is_admin(): i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], @@ -632,6 +700,9 @@ class CloudController(object): i['amiLaunchIndex'] = instance['launch_index'] i['displayName'] = instance['display_name'] i['displayDescription'] = instance['display_description'] + host = instance['host'] + zone = self._get_availability_zone_by_host(context, host) + i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] @@ -670,27 +741,35 @@ class CloudController(object): return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): + LOG.audit(_("Allocate address"), context=context) public_ip = self.network_api.allocate_floating_ip(context) return {'addressSet': [{'publicIp': public_ip}]} def release_address(self, context, public_ip, **kwargs): + LOG.audit(_("Release address %s"), public_ip, context=context) self.network_api.release_floating_ip(context, public_ip) return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): + LOG.audit(_("Associate address %s to instance %s"), public_ip, + instance_id, context=context) instance_id = ec2_id_to_id(instance_id) - self.compute_api.associate_floating_ip(context, instance_id, public_ip) + self.compute_api.associate_floating_ip(context, + instance_id=instance_id, + address=public_ip) return {'associateResponse': ["Address associated."]} def disassociate_address(self, context, public_ip, **kwargs): + LOG.audit(_("Disassociate address %s"), public_ip, context=context) self.network_api.disassociate_floating_ip(context, public_ip) return {'disassociateResponse': ["Address disassociated."]} def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) instances = self.compute_api.create(context, - instance_types.get_by_type(kwargs.get('instance_type', None)), - kwargs['image_id'], + instance_type=instance_types.get_by_type( + kwargs.get('instance_type', None)), + image_id=kwargs['image_id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id', None), @@ -701,37 +780,37 @@ class CloudController(object): user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( - 'AvailabilityZone'), - generate_hostname=id_to_ec2_id) + 'AvailabilityZone')) return self._format_run_instances(context, instances[0]['reservation_id']) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" - logging.debug("Going to start terminating instances") + LOG.debug(_("Going to start terminating instances")) for ec2_id in instance_id: instance_id = ec2_id_to_id(ec2_id) - self.compute_api.delete(context, instance_id) + self.compute_api.delete(context, instance_id=instance_id) return True def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" + LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: instance_id = ec2_id_to_id(ec2_id) - self.compute_api.reboot(context, instance_id) + self.compute_api.reboot(context, instance_id=instance_id) return True def rescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" instance_id = ec2_id_to_id(instance_id) - self.compute_api.rescue(context, instance_id) + self.compute_api.rescue(context, instance_id=instance_id) return True def unrescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" instance_id = ec2_id_to_id(instance_id) - self.compute_api.unrescue(context, instance_id) + self.compute_api.unrescue(context, instance_id=instance_id) return True def update_instance(self, context, ec2_id, **kwargs): @@ -742,7 +821,7 @@ class CloudController(object): changes[field] = kwargs[field] if changes: instance_id = ec2_id_to_id(ec2_id) - self.compute_api.update(context, instance_id, **kwargs) + self.compute_api.update(context, instance_id=instance_id, **kwargs) return True def describe_images(self, context, image_id=None, **kwargs): @@ -753,6 +832,7 @@ class CloudController(object): return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): + LOG.audit(_("De-registering image %s"), image_id, context=context) self.image_service.deregister(context, image_id) return {'imageId': image_id} @@ -760,7 +840,8 @@ class CloudController(object): if image_location is None and 'name' in kwargs: image_location = kwargs['name'] image_id = self.image_service.register(context, image_location) - logging.debug("Registered %s as %s" % (image_location, image_id)) + LOG.audit(_("Registered image %s with id %s"), image_location, + image_id, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): @@ -788,6 +869,7 @@ class CloudController(object): raise exception.ApiError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: raise exception.ApiError(_('operation_type must be add or remove')) + LOG.audit(_("Updating image %s publicity"), image_id, context=context) return self.image_service.modify(context, image_id, operation_type) def update_image(self, context, image_id, **kwargs): diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index a57a6698a..6fb441656 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -18,19 +18,20 @@ """Metadata request handler.""" -import logging - import webob.dec import webob.exc +from nova import log as logging from nova import flags +from nova import wsgi from nova.api.ec2 import cloud +LOG = logging.getLogger('nova.api.ec2.metadata') FLAGS = flags.FLAGS -class MetadataRequestHandler(object): +class MetadataRequestHandler(wsgi.Application): """Serve metadata from the EC2 API.""" def print_data(self, data): @@ -72,14 +73,9 @@ class MetadataRequestHandler(object): remote_address = req.headers.get('X-Forwarded-For', remote_address) meta_data = cc.get_metadata(remote_address) if meta_data is None: - logging.error(_('Failed to get metadata for ip: %s') % - remote_address) + LOG.error(_('Failed to get metadata for ip: %s'), remote_address) raise webob.exc.HTTPNotFound() data = self.lookup(req.path_info, meta_data) if data is None: raise webob.exc.HTTPNotFound() return self.print_data(data) - - -def metadata_factory(global_args, **local_args): - return MetadataRequestHandler() diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index a1430caed..f2caac483 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -20,59 +20,41 @@ WSGI middleware for OpenStack API controllers. """ -import time - -import logging import routes -import traceback import webob.dec import webob.exc -import webob -from nova import context from nova import flags -from nova import utils +from nova import log as logging from nova import wsgi from nova.api.openstack import faults from nova.api.openstack import backup_schedules +from nova.api.openstack import consoles from nova.api.openstack import flavors from nova.api.openstack import images -from nova.api.openstack import ratelimiting from nova.api.openstack import servers -from nova.api.openstack import sharedipgroups +from nova.api.openstack import shared_ip_groups +LOG = logging.getLogger('nova.api.openstack') FLAGS = flags.FLAGS -flags.DEFINE_string('os_api_auth', - 'nova.api.openstack.auth.AuthMiddleware', - 'The auth mechanism to use for the OpenStack API implemenation') - -flags.DEFINE_string('os_api_ratelimiting', - 'nova.api.openstack.ratelimiting.RateLimitingMiddleware', - 'Default ratelimiting implementation for the Openstack API') - +flags.DEFINE_string('os_krm_mapping_file', + 'krm_mapping.json', + 'Location of OpenStack Flavor/OS:EC2 Kernel/Ramdisk/Machine JSON file.') flags.DEFINE_bool('allow_admin_api', False, 'When True, this API service will accept admin operations.') -class API(wsgi.Middleware): - """WSGI entry point for all OpenStack API requests.""" - - def __init__(self): - auth_middleware = utils.import_class(FLAGS.os_api_auth) - ratelimiting_middleware = \ - utils.import_class(FLAGS.os_api_ratelimiting) - app = auth_middleware(ratelimiting_middleware(APIRouter())) - super(API, self).__init__(app) +class FaultWrapper(wsgi.Middleware): + """Calls down the middleware stack, making exceptions into faults.""" @webob.dec.wsgify def __call__(self, req): try: return req.get_response(self.application) except Exception as ex: - logging.warn(_("Caught error: %s") % str(ex)) - logging.error(traceback.format_exc()) + LOG.exception(_("Caught error: %s"), str(ex)) exc = webob.exc.HTTPInternalServerError(explanation=str(ex)) return faults.Fault(exc) @@ -83,12 +65,17 @@ class APIRouter(wsgi.Router): and method. """ + @classmethod + def factory(cls, global_config, **local_config): + """Simple paste factory, :class:`nova.wsgi.Router` doesn't have one""" + return cls() + def __init__(self): mapper = routes.Mapper() server_members = {'action': 'POST'} if FLAGS.allow_admin_api: - logging.debug("Including admin operations in API.") + LOG.debug(_("Including admin operations in API.")) server_members['pause'] = 'POST' server_members['unpause'] = 'POST' server_members["diagnostics"] = "GET" @@ -105,12 +92,18 @@ class APIRouter(wsgi.Router): parent_resource=dict(member_name='server', collection_name='servers')) + mapper.resource("console", "consoles", + controller=consoles.Controller(), + parent_resource=dict(member_name='server', + collection_name='servers')) + mapper.resource("image", "images", controller=images.Controller(), collection={'detail': 'GET'}) mapper.resource("flavor", "flavors", controller=flavors.Controller(), collection={'detail': 'GET'}) - mapper.resource("sharedipgroup", "sharedipgroups", - controller=sharedipgroups.Controller()) + mapper.resource("shared_ip_group", "shared_ip_groups", + collection={'detail': 'GET'}, + controller=shared_ip_groups.Controller()) super(APIRouter, self).__init__(mapper) @@ -126,11 +119,3 @@ class Versions(wsgi.Application): "application/xml": { "attributes": dict(version=["status", "id"])}} return wsgi.Serializer(req.environ, metadata).to_content_type(response) - - -def router_factory(global_cof, **local_conf): - return APIRouter() - - -def versions_factory(global_conf, **local_conf): - return Versions() diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 00e817c8d..1dfdd5318 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -134,9 +134,3 @@ class AuthMiddleware(wsgi.Middleware): token = self.db.auth_create_token(ctxt, token_dict) return token, user return None, None - - -def auth_factory(global_conf, **local_conf): - def auth(app): - return AuthMiddleware(app) - return auth diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index fcc07bdd3..197125d86 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -15,7 +15,9 @@ # License for the specific language governing permissions and limitations # under the License. +import logging import time + from webob import exc from nova import wsgi @@ -46,8 +48,8 @@ class Controller(wsgi.Controller): def create(self, req, server_id): """ No actual update method required, since the existing API allows both create and update through a POST """ - return faults.Fault(exc.HTTPNotFound()) + return faults.Fault(exc.HTTPNotImplemented()) def delete(self, req, server_id, id): """ Deletes an existing backup schedule """ - return faults.Fault(exc.HTTPNotFound()) + return faults.Fault(exc.HTTPNotImplemented()) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index ac0572c96..037ed47a0 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -15,6 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import exception + def limited(items, req): """Return a slice of items according to requested offset and limit. @@ -34,3 +36,25 @@ def limited(items, req): limit = min(1000, limit) range_end = offset + limit return items[offset:range_end] + + +def get_image_id_from_image_hash(image_service, context, image_hash): + """Given an Image ID Hash, return an objectstore Image ID. + + image_service - reference to objectstore compatible image service. + context - security context for image service requests. + image_hash - hash of the image ID. + """ + + # FIX(sandy): This is terribly inefficient. It pulls all images + # from objectstore in order to find the match. ObjectStore + # should have a numeric counterpart to the string ID. + try: + items = image_service.detail(context) + except NotImplementedError: + items = image_service.index(context) + for image in items: + image_id = image['imageId'] + if abs(hash(image_id)) == int(image_hash): + return image_id + raise exception.NotFound(image_hash) diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py new file mode 100644 index 000000000..9ebdbe710 --- /dev/null +++ b/nova/api/openstack/consoles.py @@ -0,0 +1,96 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from webob import exc + +from nova import console +from nova import exception +from nova import wsgi +from nova.api.openstack import faults + + +def _translate_keys(cons): + """Coerces a console instance into proper dictionary format """ + pool = cons['pool'] + info = {'id': cons['id'], + 'console_type': pool['console_type']} + return dict(console=info) + + +def _translate_detail_keys(cons): + """Coerces a console instance into proper dictionary format with + correctly mapped attributes """ + pool = cons['pool'] + info = {'id': cons['id'], + 'console_type': pool['console_type'], + 'password': cons['password'], + 'port': cons['port'], + 'host': pool['public_hostname']} + return dict(console=info) + + +class Controller(wsgi.Controller): + """The Consoles Controller for the Openstack API""" + + _serialization_metadata = { + 'application/xml': { + 'attributes': { + 'console': []}}} + + def __init__(self): + self.console_api = console.API() + super(Controller, self).__init__() + + def index(self, req, server_id): + """Returns a list of consoles for this instance""" + consoles = self.console_api.get_consoles( + req.environ['nova.context'], + int(server_id)) + return dict(consoles=[_translate_keys(console) + for console in consoles]) + + def create(self, req, server_id): + """Creates a new console""" + #info = self._deserialize(req.body, req) + self.console_api.create_console( + req.environ['nova.context'], + int(server_id)) + + def show(self, req, server_id, id): + """Shows in-depth information on a specific console""" + try: + console = self.console_api.get_console( + req.environ['nova.context'], + int(server_id), + int(id)) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return _translate_detail_keys(console) + + def update(self, req, server_id, id): + """You can't update a console""" + raise faults.Fault(exc.HTTPNotImplemented()) + + def delete(self, req, server_id, id): + """Deletes a console""" + try: + self.console_api.delete_console(req.environ['nova.context'], + int(server_id), + int(id)) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPAccepted() diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 0b239aab8..9d56bc508 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -15,6 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. +import logging + from webob import exc from nova import compute @@ -26,6 +28,7 @@ from nova.api.openstack import common from nova.api.openstack import faults import nova.image.service + FLAGS = flags.FLAGS @@ -75,7 +78,14 @@ def _translate_status(item): 'decrypting': 'preparing', 'untarring': 'saving', 'available': 'active'} - item['status'] = status_mapping[item['status']] + try: + item['status'] = status_mapping[item['status']] + except KeyError: + # TODO(sirp): Performing translation of status (if necessary) here for + # now. Perhaps this should really be done in EC2 API and + # S3ImageService + pass + return item @@ -88,6 +98,14 @@ def _filter_keys(item, keys): return dict((k, v) for k, v in item.iteritems() if k in keys) +def _convert_image_id_to_hash(image): + if 'imageId' in image: + # Convert EC2-style ID (i-blah) to Rackspace-style (int) + image_id = abs(hash(image['imageId'])) + image['imageId'] = image_id + image['id'] = image_id + + class Controller(wsgi.Controller): _serialization_metadata = { @@ -112,6 +130,9 @@ class Controller(wsgi.Controller): items = self._service.detail(req.environ['nova.context']) except NotImplementedError: items = self._service.index(req.environ['nova.context']) + for image in items: + _convert_image_id_to_hash(image) + items = common.limited(items, req) items = [_translate_keys(item) for item in items] items = [_translate_status(item) for item in items] @@ -119,7 +140,12 @@ class Controller(wsgi.Controller): def show(self, req, id): """Return data about the given image id""" - return dict(image=self._service.show(req.environ['nova.context'], id)) + image_id = common.get_image_id_from_image_hash(self._service, + req.environ['nova.context'], id) + + image = self._service.show(req.environ['nova.context'], image_id) + _convert_image_id_to_hash(image) + return dict(image=image) def delete(self, req, id): # Only public images are supported for now. @@ -130,7 +156,11 @@ class Controller(wsgi.Controller): env = self._deserialize(req.body, req) instance_id = env["image"]["serverId"] name = env["image"]["name"] - return compute.API().snapshot(context, instance_id, name) + + image_meta = compute.API().snapshot( + context, instance_id, name) + + return dict(image=image_meta) def update(self, req, id): # Users may not modify public images, and that's all that diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index 81b83142f..cbb4b897e 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -219,9 +219,3 @@ class WSGIAppProxy(object): # No delay return None return float(resp.getheader('X-Wait-Seconds')) - - -def ratelimit_factory(global_conf, **local_conf): - def rl(app): - return RateLimitingMiddleware(app) - return rl diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ce64ac7ad..8cbcebed2 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -15,14 +15,17 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +import json import traceback from webob import exc from nova import compute from nova import exception +from nova import flags +from nova import log as logging from nova import wsgi +from nova import utils from nova.api.openstack import common from nova.api.openstack import faults from nova.auth import manager as auth_manager @@ -35,6 +38,9 @@ LOG = logging.getLogger('server') LOG.setLevel(logging.DEBUG) +FLAGS = flags.FLAGS + + def _translate_detail_keys(inst): """ Coerces into dictionary format, mapping everything to Rackspace-like attributes for return""" @@ -44,7 +50,7 @@ def _translate_detail_keys(inst): power_state.RUNNING: 'active', power_state.BLOCKED: 'active', power_state.SUSPENDED: 'suspended', - power_state.PAUSED: 'error', + power_state.PAUSED: 'paused', power_state.SHUTDOWN: 'active', power_state.SHUTOFF: 'active', power_state.CRASHED: 'error'} @@ -81,6 +87,7 @@ class Controller(wsgi.Controller): def __init__(self): self.compute_api = compute.API() + self._image_service = utils.import_object(FLAGS.image_service) super(Controller, self).__init__() def index(self, req): @@ -117,6 +124,18 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() + def _get_kernel_ramdisk_from_image(self, image_id): + mapping_filename = FLAGS.os_krm_mapping_file + + with open(mapping_filename) as f: + mapping = json.load(f) + if image_id in mapping: + return mapping[image_id] + + raise exception.NotFound( + _("No entry for image '%s' in mapping file '%s'") % + (image_id, mapping_filename)) + def create(self, req): """ Creates a new server for a given user """ env = self._deserialize(req.body, req) @@ -125,10 +144,15 @@ class Controller(wsgi.Controller): key_pair = auth_manager.AuthManager.get_key_pairs( req.environ['nova.context'])[0] + image_id = common.get_image_id_from_image_hash(self._image_service, + req.environ['nova.context'], env['server']['imageId']) + kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(image_id) instances = self.compute_api.create( req.environ['nova.context'], instance_types.get_by_flavor_id(env['server']['flavorId']), - env['server']['imageId'], + image_id, + kernel_id=kernel_id, + ramdisk_id=ramdisk_id, display_name=env['server']['name'], display_description=env['server']['name'], key_name=key_pair['name'], @@ -141,15 +165,18 @@ class Controller(wsgi.Controller): if not inst_dict: return faults.Fault(exc.HTTPUnprocessableEntity()) + ctxt = req.environ['nova.context'] update_dict = {} if 'adminPass' in inst_dict['server']: update_dict['admin_pass'] = inst_dict['server']['adminPass'] + try: + self.compute_api.set_admin_password(ctxt, id) + except exception.TimeoutException, e: + return exc.HTTPRequestTimeout() if 'name' in inst_dict['server']: update_dict['display_name'] = inst_dict['server']['name'] - try: - self.compute_api.update(req.environ['nova.context'], id, - **update_dict) + self.compute_api.update(ctxt, id, **update_dict) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) return exc.HTTPNoContent() @@ -158,6 +185,7 @@ class Controller(wsgi.Controller): """ Multi-purpose method used to reboot, rebuild, and resize a server """ input_dict = self._deserialize(req.body, req) + #TODO(sandy): rebuild/resize not supported. try: reboot_type = input_dict['reboot']['type'] except Exception: @@ -170,6 +198,50 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + def lock(self, req, id): + """ + lock the instance with id + admin only operation + + """ + context = req.environ['nova.context'] + try: + self.compute_api.lock(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::lock %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def unlock(self, req, id): + """ + unlock the instance with id + admin only operation + + """ + context = req.environ['nova.context'] + try: + self.compute_api.unlock(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::unlock %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def get_lock(self, req, id): + """ + return the boolean state of (instance with id)'s lock + + """ + context = req.environ['nova.context'] + try: + self.compute_api.get_lock(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::get_lock %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + def pause(self, req, id): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] @@ -177,7 +249,7 @@ class Controller(wsgi.Controller): self.compute_api.pause(ctxt, id) except: readable = traceback.format_exc() - logging.error(_("Compute.api::pause %s"), readable) + LOG.exception(_("Compute.api::pause %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() @@ -188,7 +260,7 @@ class Controller(wsgi.Controller): self.compute_api.unpause(ctxt, id) except: readable = traceback.format_exc() - logging.error(_("Compute.api::unpause %s"), readable) + LOG.exception(_("Compute.api::unpause %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() @@ -199,7 +271,7 @@ class Controller(wsgi.Controller): self.compute_api.suspend(context, id) except: readable = traceback.format_exc() - logging.error(_("compute.api::suspend %s"), readable) + LOG.exception(_("compute.api::suspend %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() @@ -210,10 +282,19 @@ class Controller(wsgi.Controller): self.compute_api.resume(context, id) except: readable = traceback.format_exc() - logging.error(_("compute.api::resume %s"), readable) + LOG.exception(_("compute.api::resume %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + def get_ajax_console(self, req, id): + """ Returns a url to an instance's ajaxterm console. """ + try: + self.compute_api.get_ajax_console(req.environ['nova.context'], + int(id)) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPAccepted() + def diagnostics(self, req, id): """Permit Admins to retrieve server diagnostics.""" ctxt = req.environ["nova.context"] @@ -222,4 +303,13 @@ class Controller(wsgi.Controller): def actions(self, req, id): """Permit Admins to retrieve server actions.""" ctxt = req.environ["nova.context"] - return self.compute_api.get_actions(ctxt, id) + items = self.compute_api.get_actions(ctxt, id) + actions = [] + # TODO(jk0): Do not do pre-serialization here once the default + # serializer is updated + for item in items: + actions.append(dict( + created_at=str(item.created_at), + action=item.action, + error=item.error)) + return dict(actions=actions) diff --git a/nova/api/openstack/sharedipgroups.py b/nova/api/openstack/shared_ip_groups.py index 845f5bead..bd3cc23a8 100644 --- a/nova/api/openstack/sharedipgroups.py +++ b/nova/api/openstack/shared_ip_groups.py @@ -15,6 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. +import logging + from webob import exc from nova import wsgi @@ -29,7 +31,7 @@ def _translate_keys(inst): def _translate_detail_keys(inst): """ Coerces a shared IP group instance into proper dictionary format with correctly mapped attributes """ - return dict(sharedIpGroup=inst) + return dict(sharedIpGroups=inst) class Controller(wsgi.Controller): @@ -54,12 +56,12 @@ class Controller(wsgi.Controller): def delete(self, req, id): """ Deletes a Shared IP Group """ - raise faults.Fault(exc.HTTPNotFound()) + raise faults.Fault(exc.HTTPNotImplemented()) - def detail(self, req, id): + def detail(self, req): """ Returns a complete list of Shared IP Groups """ return _translate_detail_keys({}) def create(self, req): """ Creates a new Shared IP group """ - raise faults.Fault(exc.HTTPNotFound()) + raise faults.Fault(exc.HTTPNotImplemented()) diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py index 47e435cb6..0eb6fe588 100644 --- a/nova/auth/dbdriver.py +++ b/nova/auth/dbdriver.py @@ -20,7 +20,6 @@ Auth driver using the DB as its backend. """ -import logging import sys from nova import context diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 7616ff112..bc53e0ec6 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -24,11 +24,11 @@ other backends by creating another class that exposes the same public methods. """ -import logging import sys from nova import exception from nova import flags +from nova import log as logging FLAGS = flags.FLAGS @@ -65,6 +65,8 @@ flags.DEFINE_string('ldap_netadmin', flags.DEFINE_string('ldap_developer', 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers') +LOG = logging.getLogger("nova.ldapdriver") + # TODO(vish): make an abstract base class with the same public methods # to define a set interface for AuthDrivers. I'm delaying @@ -117,8 +119,7 @@ class LdapDriver(object): def get_project(self, pid): """Retrieve project by id""" - dn = 'cn=%s,%s' % (pid, - FLAGS.ldap_project_subtree) + dn = self.__project_to_dn(pid) attr = self.__find_object(dn, LdapDriver.project_pattern) return self.__to_project(attr) @@ -226,7 +227,8 @@ class LdapDriver(object): ('description', [description]), (LdapDriver.project_attribute, [manager_dn]), ('member', members)] - self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) + dn = self.__project_to_dn(name, search=False) + self.conn.add_s(dn, attr) return self.__to_project(dict(attr)) def modify_project(self, project_id, manager_uid=None, description=None): @@ -244,23 +246,22 @@ class LdapDriver(object): manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) - self.conn.modify_s('cn=%s,%s' % (project_id, - FLAGS.ldap_project_subtree), - attr) + dn = self.__project_to_dn(project_id) + self.conn.modify_s(dn, attr) def add_to_project(self, uid, project_id): """Add user to project""" - dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + dn = self.__project_to_dn(project_id) return self.__add_to_group(uid, dn) def remove_from_project(self, uid, project_id): """Remove user from project""" - dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + dn = self.__project_to_dn(project_id) return self.__remove_from_group(uid, dn) def is_in_project(self, uid, project_id): """Check if user is in project""" - dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + dn = self.__project_to_dn(project_id) return self.__is_in_group(uid, dn) def has_role(self, uid, role, project_id=None): @@ -300,7 +301,7 @@ class LdapDriver(object): roles.append(role) return roles else: - project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + project_dn = self.__project_to_dn(project_id) query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' % (LdapDriver.project_pattern, self.__uid_to_dn(uid))) roles = self.__find_objects(project_dn, query) @@ -333,7 +334,7 @@ class LdapDriver(object): def delete_project(self, project_id): """Delete a project""" - project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + project_dn = self.__project_to_dn(project_id) self.__delete_roles(project_dn) self.__delete_group(project_dn) @@ -365,9 +366,10 @@ class LdapDriver(object): def __get_ldap_user(self, uid): """Retrieve LDAP user entry by id""" - attr = self.__find_object(self.__uid_to_dn(uid), - '(objectclass=novaUser)') - return attr + dn = FLAGS.ldap_user_subtree + query = ('(&(%s=%s)(objectclass=novaUser))' % + (FLAGS.ldap_user_id_attribute, uid)) + return self.__find_object(dn, query) def __find_object(self, dn, query=None, scope=None): """Find an object by dn and query""" @@ -418,15 +420,13 @@ class LdapDriver(object): query = '(objectclass=groupOfNames)' return self.__find_object(dn, query) is not None - @staticmethod - def __role_to_dn(role, project_id=None): + def __role_to_dn(self, role, project_id=None): """Convert role to corresponding dn""" if project_id is None: return FLAGS.__getitem__("ldap_%s" % role).value else: - return 'cn=%s,cn=%s,%s' % (role, - project_id, - FLAGS.ldap_project_subtree) + project_dn = self.__project_to_dn(project_id) + return 'cn=%s,%s' % (role, project_dn) def __create_group(self, group_dn, name, uid, description, member_uids=None): @@ -502,8 +502,8 @@ class LdapDriver(object): try: self.conn.modify_s(group_dn, attr) except self.ldap.OBJECT_CLASS_VIOLATION: - logging.debug(_("Attempted to remove the last member of a group. " - "Deleting the group at %s instead."), group_dn) + LOG.debug(_("Attempted to remove the last member of a group. " + "Deleting the group at %s instead."), group_dn) self.__delete_group(group_dn) def __remove_from_all(self, uid): @@ -532,6 +532,42 @@ class LdapDriver(object): for role_dn in self.__find_role_dns(project_dn): self.__delete_group(role_dn) + def __to_project(self, attr): + """Convert ldap attributes to Project object""" + if attr is None: + return None + member_dns = attr.get('member', []) + return { + 'id': attr['cn'][0], + 'name': attr['cn'][0], + 'project_manager_id': + self.__dn_to_uid(attr[LdapDriver.project_attribute][0]), + 'description': attr.get('description', [None])[0], + 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} + + def __uid_to_dn(self, uid, search=True): + """Convert uid to dn""" + # By default return a generated DN + userdn = (FLAGS.ldap_user_id_attribute + '=%s,%s' + % (uid, FLAGS.ldap_user_subtree)) + if search: + query = ('%s=%s' % (FLAGS.ldap_user_id_attribute, uid)) + user = self.__find_dns(FLAGS.ldap_user_subtree, query) + if len(user) > 0: + userdn = user[0] + return userdn + + def __project_to_dn(self, pid, search=True): + """Convert pid to dn""" + # By default return a generated DN + projectdn = ('cn=%s,%s' % (pid, FLAGS.ldap_project_subtree)) + if search: + query = ('(&(cn=%s)%s)' % (pid, LdapDriver.project_pattern)) + project = self.__find_dns(FLAGS.ldap_project_subtree, query) + if len(project) > 0: + projectdn = project[0] + return projectdn + @staticmethod def __to_user(attr): """Convert ldap attributes to User object""" @@ -548,30 +584,11 @@ class LdapDriver(object): else: return None - def __to_project(self, attr): - """Convert ldap attributes to Project object""" - if attr is None: - return None - member_dns = attr.get('member', []) - return { - 'id': attr['cn'][0], - 'name': attr['cn'][0], - 'project_manager_id': - self.__dn_to_uid(attr[LdapDriver.project_attribute][0]), - 'description': attr.get('description', [None])[0], - 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} - @staticmethod def __dn_to_uid(dn): """Convert user dn to uid""" return dn.split(',')[0].split('=')[1] - @staticmethod - def __uid_to_dn(uid): - """Convert uid to dn""" - return (FLAGS.ldap_user_id_attribute + '=%s,%s' - % (uid, FLAGS.ldap_user_subtree)) - class FakeLdapDriver(LdapDriver): """Fake Ldap Auth driver""" diff --git a/nova/auth/manager.py b/nova/auth/manager.py index d3e266952..1652e24e1 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -20,7 +20,6 @@ Nova authentication management """ -import logging import os import shutil import string # pylint: disable-msg=W0402 @@ -33,6 +32,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth import signer @@ -70,6 +70,8 @@ flags.DEFINE_string('credential_rc_file', '%src', flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', 'Driver that auth manager uses') +LOG = logging.getLogger('nova.auth.manager') + class AuthBase(object): """Base class for objects relating to auth @@ -254,43 +256,51 @@ class AuthManager(object): # TODO(vish): check for valid timestamp (access_key, _sep, project_id) = access.partition(':') - logging.info(_('Looking up user: %r'), access_key) + LOG.debug(_('Looking up user: %r'), access_key) user = self.get_user_from_access_key(access_key) - logging.info('user: %r', user) + LOG.debug('user: %r', user) if user == None: + LOG.audit(_("Failed authorization for access key %s"), access_key) raise exception.NotFound(_('No user found for access key %s') % access_key) # NOTE(vish): if we stop using project name as id we need better # logic to find a default project for user if project_id == '': + LOG.debug(_("Using project name = user name (%s)"), user.name) project_id = user.name project = self.get_project(project_id) if project == None: + LOG.audit(_("failed authorization: no project named %s (user=%s)"), + project_id, user.name) raise exception.NotFound(_('No project called %s could be found') % project_id) if not self.is_admin(user) and not self.is_project_member(user, project): + LOG.audit(_("Failed authorization: user %s not admin and not " + "member of project %s"), user.name, project.name) raise exception.NotFound(_('User %s is not a member of project %s') % (user.id, project.id)) if check_type == 's3': sign = signer.Signer(user.secret.encode()) expected_signature = sign.s3_authorization(headers, verb, path) - logging.debug('user.secret: %s', user.secret) - logging.debug('expected_signature: %s', expected_signature) - logging.debug('signature: %s', signature) + LOG.debug('user.secret: %s', user.secret) + LOG.debug('expected_signature: %s', expected_signature) + LOG.debug('signature: %s', signature) if signature != expected_signature: + LOG.audit(_("Invalid signature for user %s"), user.name) raise exception.NotAuthorized(_('Signature does not match')) elif check_type == 'ec2': # NOTE(vish): hmac can't handle unicode, so encode ensures that # secret isn't unicode expected_signature = signer.Signer(user.secret.encode()).generate( params, verb, server_string, path) - logging.debug('user.secret: %s', user.secret) - logging.debug('expected_signature: %s', expected_signature) - logging.debug('signature: %s', signature) + LOG.debug('user.secret: %s', user.secret) + LOG.debug('expected_signature: %s', expected_signature) + LOG.debug('signature: %s', signature) if signature != expected_signature: + LOG.audit(_("Invalid signature for user %s"), user.name) raise exception.NotAuthorized(_('Signature does not match')) return (user, project) @@ -398,6 +408,12 @@ class AuthManager(object): raise exception.NotFound(_("The %s role can not be found") % role) if project is not None and role in FLAGS.global_roles: raise exception.NotFound(_("The %s role is global only") % role) + if project: + LOG.audit(_("Adding role %s to user %s in project %s"), role, + User.safe_id(user), Project.safe_id(project)) + else: + LOG.audit(_("Adding sitewide role %s to user %s"), role, + User.safe_id(user)) with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) @@ -418,6 +434,12 @@ class AuthManager(object): @type project: Project or project_id @param project: Project in which to remove local role. """ + if project: + LOG.audit(_("Removing role %s from user %s on project %s"), + role, User.safe_id(user), Project.safe_id(project)) + else: + LOG.audit(_("Removing sitewide role %s from user %s"), role, + User.safe_id(user)) with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) @@ -480,6 +502,8 @@ class AuthManager(object): description, member_users) if project_dict: + LOG.audit(_("Created project %s with manager %s"), name, + manager_user) project = Project(**project_dict) return project @@ -496,6 +520,7 @@ class AuthManager(object): @param project: This will be the new description of the project. """ + LOG.audit(_("modifying project %s"), Project.safe_id(project)) if manager_user: manager_user = User.safe_id(manager_user) with self.driver() as drv: @@ -505,6 +530,8 @@ class AuthManager(object): def add_to_project(self, user, project): """Add user to project""" + LOG.audit(_("Adding user %s to project %s"), User.safe_id(user), + Project.safe_id(project)) with self.driver() as drv: return drv.add_to_project(User.safe_id(user), Project.safe_id(project)) @@ -523,6 +550,8 @@ class AuthManager(object): def remove_from_project(self, user, project): """Removes a user from a project""" + LOG.audit(_("Remove user %s from project %s"), User.safe_id(user), + Project.safe_id(project)) with self.driver() as drv: return drv.remove_from_project(User.safe_id(user), Project.safe_id(project)) @@ -549,6 +578,7 @@ class AuthManager(object): def delete_project(self, project): """Deletes a project""" + LOG.audit(_("Deleting project %s"), Project.safe_id(project)) with self.driver() as drv: drv.delete_project(Project.safe_id(project)) @@ -603,13 +633,16 @@ class AuthManager(object): with self.driver() as drv: user_dict = drv.create_user(name, access, secret, admin) if user_dict: - return User(**user_dict) + rv = User(**user_dict) + LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin) + return rv def delete_user(self, user): """Deletes a user Additionally deletes all users key_pairs""" uid = User.safe_id(user) + LOG.audit(_("Deleting user %s"), uid) db.key_pair_destroy_all_by_user(context.get_admin_context(), uid) with self.driver() as drv: @@ -618,6 +651,12 @@ class AuthManager(object): def modify_user(self, user, access_key=None, secret_key=None, admin=None): """Modify credentials for a user""" uid = User.safe_id(user) + if access_key: + LOG.audit(_("Access Key change for user %s"), uid) + if secret_key: + LOG.audit(_("Secret Key change for user %s"), uid) + if admin is not None: + LOG.audit(_("Admin status set to %r for user %s"), admin, uid) with self.driver() as drv: drv.modify_user(uid, access_key, secret_key, admin) @@ -643,10 +682,9 @@ class AuthManager(object): region, _sep, region_host = item.partition("=") regions[region] = region_host else: - regions = {'nova': FLAGS.cc_host} + regions = {'nova': FLAGS.ec2_host} for region, host in regions.iteritems(): - rc = self.__generate_rc(user.access, - user.secret, + rc = self.__generate_rc(user, pid, use_dmz, host) @@ -666,7 +704,7 @@ class AuthManager(object): port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: - logging.warn(_("No vpn data for project %s"), pid) + LOG.warn(_("No vpn data for project %s"), pid) zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid)) zippy.close() @@ -683,30 +721,35 @@ class AuthManager(object): if project is None: project = user.id pid = Project.safe_id(project) - return self.__generate_rc(user.access, user.secret, pid, use_dmz) + return self.__generate_rc(user, pid, use_dmz) @staticmethod - def __generate_rc(access, secret, pid, use_dmz=True, host=None): + def __generate_rc(user, pid, use_dmz=True, host=None): """Generate rc file for user""" if use_dmz: - cc_host = FLAGS.cc_dmz + ec2_host = FLAGS.ec2_dmz_host else: - cc_host = FLAGS.cc_host + ec2_host = FLAGS.ec2_host # NOTE(vish): Always use the dmz since it is used from inside the # instance s3_host = FLAGS.s3_dmz if host: s3_host = host - cc_host = host + ec2_host = host rc = open(FLAGS.credentials_template).read() - rc = rc % {'access': access, + rc = rc % {'access': user.access, 'project': pid, - 'secret': secret, - 'ec2': '%s://%s:%s%s' % (FLAGS.ec2_prefix, - cc_host, - FLAGS.cc_port, - FLAGS.ec2_suffix), + 'secret': user.secret, + 'ec2': '%s://%s:%s%s' % (FLAGS.ec2_scheme, + ec2_host, + FLAGS.ec2_port, + FLAGS.ec2_path), 's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port), + 'os': '%s://%s:%s%s' % (FLAGS.osapi_scheme, + ec2_host, + FLAGS.osapi_port, + FLAGS.osapi_path), + 'user': user.name, 'nova': FLAGS.ca_file, 'cert': FLAGS.credential_cert_file, 'key': FLAGS.credential_key_file} diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 1b8ecb173..c53a4acdc 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -10,3 +10,7 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}" alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" +export CLOUD_SERVERS_API_KEY="%(access)s" +export CLOUD_SERVERS_USERNAME="%(user)s" +export CLOUD_SERVERS_URL="%(os)s" + diff --git a/nova/auth/signer.py b/nova/auth/signer.py index f7d29f534..744e315d4 100644 --- a/nova/auth/signer.py +++ b/nova/auth/signer.py @@ -46,7 +46,6 @@ Utility class for parsing signed AMI manifests. import base64 import hashlib import hmac -import logging import urllib # NOTE(vish): for new boto @@ -54,9 +53,13 @@ import boto # NOTE(vish): for old boto import boto.utils +from nova import log as logging from nova.exception import Error +LOG = logging.getLogger('nova.signer') + + class Signer(object): """Hacked up code from boto/connection.py""" @@ -120,7 +123,7 @@ class Signer(object): def _calc_signature_2(self, params, verb, server_string, path): """Generate AWS signature version 2 string.""" - logging.debug('using _calc_signature_2') + LOG.debug('using _calc_signature_2') string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path) if self.hmac_256: current_hmac = self.hmac_256 @@ -136,13 +139,13 @@ class Signer(object): val = urllib.quote(val, safe='-_~') pairs.append(urllib.quote(key, safe='') + '=' + val) qs = '&'.join(pairs) - logging.debug('query string: %s', qs) + LOG.debug('query string: %s', qs) string_to_sign += qs - logging.debug('string_to_sign: %s', string_to_sign) + LOG.debug('string_to_sign: %s', string_to_sign) current_hmac.update(string_to_sign) b64 = base64.b64encode(current_hmac.digest()) - logging.debug('len(b64)=%d', len(b64)) - logging.debug('base64 encoded digest: %s', b64) + LOG.debug('len(b64)=%d', len(b64)) + LOG.debug('base64 encoded digest: %s', b64) return b64 diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 09361828d..dc6f55af2 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -22,7 +22,6 @@ an instance with it. """ -import logging import os import string import tempfile @@ -33,6 +32,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth import manager # TODO(eday): Eventually changes these to something not ec2-specific @@ -51,7 +51,7 @@ flags.DEFINE_string('dmz_mask', _('Netmask to push into openvpn config')) -LOG = logging.getLogger('nova-cloudpipe') +LOG = logging.getLogger('nova.cloudpipe') class CloudPipe(object): @@ -68,8 +68,8 @@ class CloudPipe(object): shellfile = open(FLAGS.boot_script_template, "r") s = string.Template(shellfile.read()) shellfile.close() - boot_script = s.substitute(cc_dmz=FLAGS.cc_dmz, - cc_port=FLAGS.cc_port, + boot_script = s.substitute(cc_dmz=FLAGS.ec2_dmz_host, + cc_port=FLAGS.ec2_port, dmz_net=FLAGS.dmz_net, dmz_mask=FLAGS.dmz_mask, num_vpn=FLAGS.cnt_vpn_clients) diff --git a/nova/compute/api.py b/nova/compute/api.py index 64d47b1ce..a6b99c1cb 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -21,12 +21,13 @@ Handles all requests relating to instances (guest vms). """ import datetime -import logging +import re import time from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import network from nova import quota from nova import rpc @@ -36,6 +37,7 @@ from nova.compute import instance_types from nova.db import base FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.compute.api') def generate_default_hostname(instance_id): @@ -46,7 +48,8 @@ def generate_default_hostname(instance_id): class API(base.Base): """API for interacting with the compute manager.""" - def __init__(self, image_service=None, network_api=None, volume_api=None, + def __init__(self, image_service=None, network_api=None, + volume_api=None, hostname_factory=generate_default_hostname, **kwargs): if not image_service: image_service = utils.import_object(FLAGS.image_service) @@ -57,19 +60,21 @@ class API(base.Base): if not volume_api: volume_api = volume.API() self.volume_api = volume_api + self.hostname_factory = hostname_factory super(API, self).__init__(**kwargs) def get_network_topic(self, context, instance_id): + """Get the network topic for an instance.""" try: instance = self.get(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) + LOG.warning(_("Instance %d was not found in get_network_topic"), + instance_id) raise e host = instance['host'] if not host: - raise exception.Error("Instance %d has no host" % instance_id) + raise exception.Error(_("Instance %d has no host") % instance_id) topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) return rpc.call(context, topic, @@ -80,18 +85,17 @@ class API(base.Base): min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None, - generate_hostname=generate_default_hostname): + availability_zone=None, user_data=None): """Create the number of instances requested if quota and other arguments check out ok.""" type_data = instance_types.INSTANCE_TYPES[instance_type] num_instances = quota.allowed_instances(context, max_count, type_data) if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % + LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"), + context.project_id, min_count) + raise quota.QuotaError(_("Instance quota exceeded. You can only " + "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") is_vpn = image_id == FLAGS.vpn_image_id @@ -105,8 +109,10 @@ class API(base.Base): if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None - logging.debug("Creating a raw instance") + LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) + logging.debug("Using Kernel=%s, Ramdisk=%s" % + (kernel_id, ramdisk_id)) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: @@ -147,11 +153,12 @@ class API(base.Base): 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data, + 'locked': False, 'availability_zone': availability_zone} elevated = context.elevated() instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) + LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, @@ -168,22 +175,27 @@ class API(base.Base): security_group_id) # Set sane defaults if not specified - updates = dict(hostname=generate_hostname(instance_id)) - if 'display_name' not in instance: + updates = dict(hostname=self.hostname_factory(instance_id)) + if (not hasattr(instance, 'display_name') or + instance.display_name == None): updates['display_name'] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) instances.append(instance) - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + LOG.debug(_("Casting to scheduler for %s/%s's instance %s"), context.project_id, context.user_id, instance_id) rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, - "instance_id": instance_id}}) + "instance_id": instance_id, + "availability_zone": availability_zone}}) - return instances + for group_id in security_groups: + self.trigger_security_group_members_refresh(elevated, group_id) + + return [dict(x.iteritems()) for x in instances] def ensure_default_security_group(self, context): """ Create security group for the security context if it @@ -202,6 +214,60 @@ class API(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) + def trigger_security_group_rules_refresh(self, context, security_group_id): + """Called when a rule is added to or removed from a security_group""" + + security_group = self.db.security_group_get(context, security_group_id) + + hosts = set() + for instance in security_group['instances']: + if instance['host'] is not None: + hosts.add(instance['host']) + + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_rules", + "args": {"security_group_id": security_group.id}}) + + def trigger_security_group_members_refresh(self, context, group_id): + """Called when a security group gains a new or loses a member + + Sends an update request to each compute node for whom this is + relevant.""" + + # First, we get the security group rules that reference this group as + # the grantee.. + security_group_rules = \ + self.db.security_group_rule_get_by_security_group_grantee( + context, + group_id) + + # ..then we distill the security groups to which they belong.. + security_groups = set() + for rule in security_group_rules: + security_groups.add(rule['parent_group_id']) + + # ..then we find the instances that are members of these groups.. + instances = set() + for security_group in security_groups: + for instance in security_group['instances']: + instances.add(instance['id']) + + # ...then we find the hosts where they live... + hosts = set() + for instance in instances: + if instance['host']: + hosts.add(instance['host']) + + # ...and finally we tell these nodes to refresh their view of this + # particular security group. + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_members", + "args": {"security_group_id": group_id}}) + def update(self, context, instance_id, **kwargs): """Updates the instance in the datastore. @@ -214,20 +280,21 @@ class API(base.Base): :retval None """ - return self.db.instance_update(context, instance_id, kwargs) + rv = self.db.instance_update(context, instance_id, kwargs) + return dict(rv.iteritems()) def delete(self, context, instance_id): - logging.debug("Going to try and terminate %s" % instance_id) + LOG.debug(_("Going to try to terminate %s"), instance_id) try: instance = self.get(context, instance_id) except exception.NotFound as e: - logging.warning(_("Instance %s was not found during terminate"), - instance_id) + LOG.warning(_("Instance %d was not found during terminate"), + instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %s is already being terminated"), - instance_id) + LOG.warning(_("Instance %d is already being terminated"), + instance_id) return self.update(context, @@ -238,16 +305,15 @@ class API(base.Base): host = instance['host'] if host: - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "terminate_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('terminate_instance', context, + instance_id, host) else: self.db.instance_destroy(context, instance_id) def get(self, context, instance_id): """Get a single instance with the given ID.""" - return self.db.instance_get_by_id(context, instance_id) + rv = self.db.instance_get_by_id(context, instance_id) + return dict(rv.iteritems()) def get_all(self, context, project_id=None, reservation_id=None, fixed_ip=None): @@ -256,7 +322,7 @@ class API(base.Base): an admin, it will retreive all instances in the system.""" if reservation_id is not None: return self.db.instance_get_all_by_reservation(context, - reservation_id) + reservation_id) if fixed_ip is not None: return self.db.fixed_ip_get_instance(context, fixed_ip) if project_id or not context.is_admin: @@ -269,50 +335,74 @@ class API(base.Base): project_id) return self.db.instance_get_all(context) + def _cast_compute_message(self, method, context, instance_id, host=None, + params=None): + """Generic handler for RPC casts to compute. + + :param params: Optional dictionary of arguments to be passed to the + compute worker + + :retval None + """ + if not params: + params = {} + if not host: + instance = self.get(context, instance_id) + host = instance['host'] + queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) + params['instance_id'] = instance_id + kwargs = {'method': method, 'args': params} + rpc.cast(context, queue, kwargs) + + def _call_compute_message(self, method, context, instance_id, host=None, + params=None): + """Generic handler for RPC calls to compute. + + :param params: Optional dictionary of arguments to be passed to the + compute worker + + :retval: Result returned by compute worker + """ + if not params: + params = {} + if not host: + instance = self.get(context, instance_id) + host = instance["host"] + queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) + params['instance_id'] = instance_id + kwargs = {'method': method, 'args': params} + return rpc.call(context, queue, kwargs) + def snapshot(self, context, instance_id, name): - """Snapshot the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "snapshot_instance", - "args": {"instance_id": instance_id, "name": name}}) + """Snapshot the given instance. + + :retval: A dict containing image metadata + """ + data = {'name': name, 'is_public': False} + image_meta = self.image_service.create(context, data) + params = {'image_id': image_meta['id']} + self._cast_compute_message('snapshot_instance', context, instance_id, + params=params) + return image_meta def reboot(self, context, instance_id): """Reboot the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "reboot_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('reboot_instance', context, instance_id) def pause(self, context, instance_id): """Pause the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "pause_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('pause_instance', context, instance_id) def unpause(self, context, instance_id): """Unpause the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unpause_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('unpause_instance', context, instance_id) def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for the given instance.""" - instance = self.get(context, instance_id) - host = instance["host"] - return rpc.call(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "get_diagnostics", - "args": {"instance_id": instance_id}}) + return self._call_compute_message( + "get_diagnostics", + context, + instance_id) def get_actions(self, context, instance_id): """Retrieve actions for the given instance.""" @@ -320,39 +410,55 @@ class API(base.Base): def suspend(self, context, instance_id): """suspend the instance with instance_id""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "suspend_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('suspend_instance', context, instance_id) def resume(self, context, instance_id): """resume the instance with instance_id""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "resume_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('resume_instance', context, instance_id) def rescue(self, context, instance_id): """Rescue the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "rescue_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('rescue_instance', context, instance_id) def unrescue(self, context, instance_id): """Unrescue the given instance.""" + self._cast_compute_message('unrescue_instance', context, instance_id) + + def set_admin_password(self, context, instance_id): + """Set the root/admin password for the given instance.""" + self._cast_compute_message('set_admin_password', context, instance_id) + + def get_ajax_console(self, context, instance_id): + """Get a url to an AJAX Console""" instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unrescue_instance", - "args": {"instance_id": instance_id}}) + output = self._call_compute_message('get_ajax_console', + context, + instance_id) + rpc.cast(context, '%s' % FLAGS.ajax_console_proxy_topic, + {'method': 'authorize_ajax_console', + 'args': {'token': output['token'], 'host': output['host'], + 'port': output['port']}}) + return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url, + output['token'])} + + def get_console_output(self, context, instance_id): + """Get console output for an an instance""" + return self._call_compute_message('get_console_output', + context, + instance_id) + + def lock(self, context, instance_id): + """lock the instance with instance_id""" + self._cast_compute_message('lock_instance', context, instance_id) + + def unlock(self, context, instance_id): + """unlock the instance with instance_id""" + self._cast_compute_message('unlock_instance', context, instance_id) + + def get_lock(self, context, instance_id): + """return the boolean state of (instance with instance_id)'s lock""" + instance = self.get(context, instance_id) + return instance['locked'] def attach_volume(self, context, instance_id, volume_id, device): if not re.match("^/dev/[a-z]d[a-z]+$", device): diff --git a/nova/compute/disk.py b/nova/compute/disk.py deleted file mode 100644 index 814a258cd..000000000 --- a/nova/compute/disk.py +++ /dev/null @@ -1,204 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Utility methods to resize, repartition, and modify disk images. - -Includes injection of SSH PGP keys into authorized_keys file. - -""" - -import logging -import os -import tempfile - -from nova import exception -from nova import flags - - -FLAGS = flags.FLAGS -flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10, - 'minimum size in bytes of root partition') -flags.DEFINE_integer('block_size', 1024 * 1024 * 256, - 'block_size to use for dd') - - -def partition(infile, outfile, local_bytes=0, resize=True, - local_type='ext2', execute=None): - """ - Turns a partition (infile) into a bootable drive image (outfile). - - The first 63 sectors (0-62) of the resulting image is a master boot record. - Infile becomes the first primary partition. - If local bytes is specified, a second primary partition is created and - formatted as ext2. - - :: - - In the diagram below, dashes represent drive sectors. - +-----+------. . .-------+------. . .------+ - | 0 a| b c|d e| - +-----+------. . .-------+------. . .------+ - | mbr | primary partiton | local partition | - +-----+------. . .-------+------. . .------+ - - """ - sector_size = 512 - file_size = os.path.getsize(infile) - if resize and file_size < FLAGS.minimum_root_size: - last_sector = FLAGS.minimum_root_size / sector_size - 1 - execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' - % (infile, last_sector, sector_size)) - execute('e2fsck -fp %s' % infile, check_exit_code=False) - execute('resize2fs %s' % infile) - file_size = FLAGS.minimum_root_size - elif file_size % sector_size != 0: - logging.warn(_("Input partition size not evenly divisible by" - " sector size: %d / %d"), file_size, sector_size) - primary_sectors = file_size / sector_size - if local_bytes % sector_size != 0: - logging.warn(_("Bytes for local storage not evenly divisible" - " by sector size: %d / %d"), local_bytes, sector_size) - local_sectors = local_bytes / sector_size - - mbr_last = 62 # a - primary_first = mbr_last + 1 # b - primary_last = primary_first + primary_sectors - 1 # c - local_first = primary_last + 1 # d - local_last = local_first + local_sectors - 1 # e - last_sector = local_last # e - - # create an empty file - execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' - % (outfile, mbr_last, sector_size)) - - # make mbr partition - execute('parted --script %s mklabel msdos' % outfile) - - # append primary file - execute('dd if=%s of=%s bs=%s conv=notrunc,fsync oflag=append' - % (infile, outfile, FLAGS.block_size)) - - # make primary partition - execute('parted --script %s mkpart primary %ds %ds' - % (outfile, primary_first, primary_last)) - - if local_bytes > 0: - # make the file bigger - execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' - % (outfile, last_sector, sector_size)) - # make and format local partition - execute('parted --script %s mkpartfs primary %s %ds %ds' - % (outfile, local_type, local_first, local_last)) - - -def extend(image, size, execute): - file_size = os.path.getsize(image) - if file_size >= size: - return - return execute('truncate -s size %s' % (image,)) - - -def inject_data(image, key=None, net=None, partition=None, execute=None): - """Injects a ssh key and optionally net data into a disk image. - - it will mount the image as a fully partitioned disk and attempt to inject - into the specified partition number. - - If partition is not specified it mounts the image as a single partition. - - """ - out, err = execute('sudo losetup --find --show %s' % image) - if err: - raise exception.Error(_('Could not attach image to loopback: %s') - % err) - device = out.strip() - try: - if not partition is None: - # create partition - out, err = execute('sudo kpartx -a %s' % device) - if err: - raise exception.Error(_('Failed to load partition: %s') % err) - mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1], - partition) - else: - mapped_device = device - - # We can only loopback mount raw images. If the device isn't there, - # it's normally because it's a .vmdk or a .vdi etc - if not os.path.exists(mapped_device): - raise exception.Error('Mapped device was not found (we can' - ' only inject raw disk images): %s' % - mapped_device) - - # Configure ext2fs so that it doesn't auto-check every N boots - out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) - - tmpdir = tempfile.mkdtemp() - try: - # mount loopback to dir - out, err = execute( - 'sudo mount %s %s' % (mapped_device, tmpdir)) - if err: - raise exception.Error(_('Failed to mount filesystem: %s') - % err) - - try: - if key: - # inject key file - _inject_key_into_fs(key, tmpdir, execute=execute) - if net: - _inject_net_into_fs(net, tmpdir, execute=execute) - finally: - # unmount device - execute('sudo umount %s' % mapped_device) - finally: - # remove temporary directory - execute('rmdir %s' % tmpdir) - if not partition is None: - # remove partitions - execute('sudo kpartx -d %s' % device) - finally: - # remove loopback - execute('sudo losetup --detach %s' % device) - - -def _inject_key_into_fs(key, fs, execute=None): - """Add the given public ssh key to root's authorized_keys. - - key is an ssh key string. - fs is the path to the base of the filesystem into which to inject the key. - """ - sshdir = os.path.join(fs, 'root', '.ssh') - execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter - execute('sudo chown root %s' % sshdir) - execute('sudo chmod 700 %s' % sshdir) - keyfile = os.path.join(sshdir, 'authorized_keys') - execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') - - -def _inject_net_into_fs(net, fs, execute=None): - """Inject /etc/network/interfaces into the filesystem rooted at fs. - - net is the contents of /etc/network/interfaces. - """ - netdir = os.path.join(os.path.join(fs, 'etc'), 'network') - execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter - execute('sudo chown root:root %s' % netdir) - execute('sudo chmod 755 %s' % netdir) - netfile = os.path.join(netdir, 'interfaces') - execute('sudo tee %s' % netfile, net) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ca6065890..6f09ce674 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -35,10 +35,15 @@ terminating it. """ import datetime +import random +import string import logging +import socket +import functools from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils @@ -51,6 +56,47 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for controlling virtualization') flags.DEFINE_string('stub_network', False, 'Stub network related code') +flags.DEFINE_integer('password_length', 12, + 'Length of generated admin passwords') +flags.DEFINE_string('console_host', socket.gethostname(), + 'Console proxy host to use to connect to instances on' + 'this host.') + +LOG = logging.getLogger('nova.compute.manager') + + +def checks_instance_lock(function): + """ + decorator used for preventing action against locked instances + unless, of course, you happen to be admin + + """ + + @functools.wraps(function) + def decorated_function(self, context, instance_id, *args, **kwargs): + + LOG.info(_("check_instance_lock: decorating: |%s|"), function, + context=context) + LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"), + self, context, instance_id, context=context) + locked = self.get_lock(context, instance_id) + admin = context.is_admin + LOG.info(_("check_instance_lock: locked: |%s|"), locked, + context=context) + LOG.info(_("check_instance_lock: admin: |%s|"), admin, + context=context) + + # if admin or unlocked call function otherwise log error + if admin or not locked: + LOG.info(_("check_instance_lock: executing: |%s|"), function, + context=context) + function(self, context, instance_id, *args, **kwargs) + else: + LOG.error(_("check_instance_lock: not executing |%s|"), + function, context=context) + return False + + return decorated_function class ComputeManager(manager.Manager): @@ -85,6 +131,15 @@ class ComputeManager(manager.Manager): state = power_state.NOSTATE self.db.instance_set_state(context, instance_id, state) + def get_console_topic(self, context, **_kwargs): + """Retrieves the console host for a project on this host + Currently this is just set in the flags for each compute + host.""" + #TODO(mdragon): perhaps make this variable by console_type? + return self.db.queue_get_for(context, + FLAGS.console_topic, + FLAGS.console_host) + def get_network_topic(self, context, **_kwargs): """Retrieves the network host for a project on this host""" # TODO(vish): This method should be memoized. This will make @@ -99,10 +154,20 @@ class ComputeManager(manager.Manager): FLAGS.network_topic, host) + def get_console_pool_info(self, context, console_type): + return self.driver.get_console_pool_info(console_type) + + @exception.wrap_exception + def refresh_security_group_rules(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + return self.driver.refresh_security_group_rules(security_group_id) + @exception.wrap_exception - def refresh_security_group(self, context, security_group_id, **_kwargs): - """This call passes stright through to the virtualization driver.""" - self.driver.refresh_security_group(security_group_id) + def refresh_security_group_members(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + return self.driver.refresh_security_group_members(security_group_id) @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): @@ -111,7 +176,8 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) - logging.debug(_("instance %s: starting..."), instance_id) + LOG.audit(_("instance %s: starting..."), instance_id, + context=context) self.db.instance_update(context, instance_id, {'host': self.host}) @@ -149,8 +215,8 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - logging.exception(_("instance %s: Failed to spawn"), - instance_ref['name']) + LOG.exception(_("instance %s: Failed to spawn"), instance_id, + context=context) self.db.instance_set_state(context, instance_id, power_state.SHUTDOWN) @@ -158,17 +224,19 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Terminating instance %s"), instance_id, context=context) if not FLAGS.stub_network: address = self.db.instance_get_floating_address(context, instance_ref['id']) if address: - logging.debug(_("Disassociating address %s") % address) + LOG.debug(_("Disassociating address %s"), address, + context=context) # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. @@ -180,15 +248,14 @@ class ComputeManager(manager.Manager): address = self.db.instance_get_fixed_address(context, instance_ref['id']) if address: - logging.debug(_("Deallocating address %s") % address) + LOG.debug(_("Deallocating address %s"), address, + context=context) # NOTE(vish): Currently, nothing needs to be done on the # network node until release. If this changes, # we will need to cast here. self.network_manager.deallocate_fixed_ip(context.elevated(), address) - logging.debug(_("instance %s: terminating"), instance_id) - volumes = instance_ref.get('volumes', []) or [] for volume in volumes: self.detach_volume(context, instance_id, volume['id']) @@ -202,20 +269,22 @@ class ComputeManager(manager.Manager): self.db.instance_destroy(context, instance_id) @exception.wrap_exception + @checks_instance_lock def reboot_instance(self, context, instance_id): """Reboot an instance on this server.""" context = context.elevated() self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Rebooting instance %s"), instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to reboot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_id, - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to reboot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_id, + instance_ref['state'], + power_state.RUNNING, + context=context) - logging.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -225,7 +294,7 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception - def snapshot_instance(self, context, instance_id, name): + def snapshot_instance(self, context, instance_id, image_id): """Snapshot an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) @@ -235,23 +304,51 @@ class ComputeManager(manager.Manager): # potentially? self._update_state(context, instance_id) - logging.debug(_('instance %s: snapshotting'), instance_ref['name']) + LOG.audit(_('instance %s: snapshotting'), instance_id, + context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to snapshot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_id, - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to snapshot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_id, instance_ref['state'], power_state.RUNNING) - self.driver.snapshot(instance_ref, name) + self.driver.snapshot(instance_ref, image_id) @exception.wrap_exception + @checks_instance_lock + def set_admin_password(self, context, instance_id, new_pass=None): + """Set the root/admin password for an instance on this server.""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + if instance_ref['state'] != power_state.RUNNING: + logging.warn('trying to reset the password on a non-running ' + 'instance: %s (state: %s expected: %s)', + instance_ref['id'], + instance_ref['state'], + power_state.RUNNING) + + logging.debug('instance %s: setting admin password', + instance_ref['name']) + if new_pass is None: + # Generate a random password + new_pass = self._generate_password(FLAGS.password_length) + + self.driver.set_admin_password(instance_ref, new_pass) + self._update_state(context, instance_id) + + def _generate_password(self, length=20): + """Generate a random sequence of letters and digits + to be used as a password. + """ + chrs = string.letters + string.digits + return "".join([random.choice(chrs) for i in xrange(length)]) + + @exception.wrap_exception + @checks_instance_lock def rescue_instance(self, context, instance_id): """Rescue an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: rescuing'), instance_id) + LOG.audit(_('instance %s: rescuing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -261,12 +358,12 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def unrescue_instance(self, context, instance_id): """Rescue an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: unrescuing'), instance_id) + LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -280,12 +377,12 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def pause_instance(self, context, instance_id): """Pause an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug('instance %s: pausing', instance_id) + LOG.audit(_('instance %s: pausing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -297,12 +394,12 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + @checks_instance_lock def unpause_instance(self, context, instance_id): """Unpause a paused instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug('instance %s: unpausing', instance_id) + LOG.audit(_('instance %s: unpausing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -319,17 +416,20 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref["state"] == power_state.RUNNING: - logging.debug(_("instance %s: retrieving diagnostics"), - instance_id) + LOG.audit(_("instance %s: retrieving diagnostics"), instance_id, + context=context) return self.driver.get_diagnostics(instance_ref) @exception.wrap_exception + @checks_instance_lock def suspend_instance(self, context, instance_id): - """suspend the instance with instance_id""" + """ + suspend the instance with instance_id + + """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: suspending'), instance_id) + LOG.audit(_('instance %s: suspending'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'suspending') @@ -340,12 +440,15 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + @checks_instance_lock def resume_instance(self, context, instance_id): - """resume the suspended instance with instance_id""" + """ + resume the suspended instance with instance_id + + """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: resuming'), instance_id) + LOG.audit(_('instance %s: resuming'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'resuming') @@ -356,21 +459,66 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + def lock_instance(self, context, instance_id): + """ + lock the instance with instance_id + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + LOG.debug(_('instance %s: locking'), instance_id, context=context) + self.db.instance_update(context, instance_id, {'locked': True}) + + @exception.wrap_exception + def unlock_instance(self, context, instance_id): + """ + unlock the instance with instance_id + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + LOG.debug(_('instance %s: unlocking'), instance_id, context=context) + self.db.instance_update(context, instance_id, {'locked': False}) + + @exception.wrap_exception + def get_lock(self, context, instance_id): + """ + return the boolean state of (instance with instance_id)'s lock + + """ + context = context.elevated() + LOG.debug(_('instance %s: getting locked state'), instance_id, + context=context) + instance_ref = self.db.instance_get(context, instance_id) + return instance_ref['locked'] + + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" context = context.elevated() - logging.debug(_("instance %s: getting console output"), instance_id) instance_ref = self.db.instance_get(context, instance_id) - + LOG.audit(_("Get console output for instance %s"), instance_id, + context=context) return self.driver.get_console_output(instance_ref) @exception.wrap_exception + def get_ajax_console(self, context, instance_id): + """Return connection information for an ajax console""" + context = context.elevated() + logging.debug(_("instance %s: getting ajax console"), instance_id) + instance_ref = self.db.instance_get(context, instance_id) + + return self.driver.get_ajax_console(instance_ref) + + @checks_instance_lock def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - logging.debug(_("instance %s: attaching volume %s to %s"), instance_id, - volume_id, mountpoint) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id, + volume_id, mountpoint, context=context) dev_path = self.volume_manager.setup_compute_volume(context, volume_id) try: @@ -385,8 +533,8 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - logging.exception(_("instance %s: attach failed %s, removing"), - instance_id, mountpoint) + LOG.exception(_("instance %s: attach failed %s, removing"), + instance_id, mountpoint, context=context) self.volume_manager.remove_compute_volume(context, volume_id) raise exc @@ -394,17 +542,18 @@ class ComputeManager(manager.Manager): return True @exception.wrap_exception + @checks_instance_lock def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" context = context.elevated() - logging.debug(_("instance %s: detaching volume %s"), - instance_id, - volume_id) instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) + LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"), + volume_id, volume_ref['mountpoint'], instance_id, + context=context) if instance_ref['name'] not in self.driver.list_instances(): - logging.warn(_("Detaching volume from unknown instance %s"), - instance_ref['name']) + LOG.warn(_("Detaching volume from unknown instance %s"), + instance_id, context=context) else: self.driver.detach_volume(instance_ref['name'], volume_ref['mountpoint']) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 60c347a5e..14d0e8ca1 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -25,19 +25,17 @@ Instance Monitoring: """ import datetime -import logging import os -import sys import time import boto import boto.s3 import rrdtool -from twisted.internet import defer from twisted.internet import task from twisted.application import service from nova import flags +from nova import log as logging from nova.virt import connection as virt_connection @@ -91,6 +89,9 @@ RRD_VALUES = { utcnow = datetime.datetime.utcnow +LOG = logging.getLogger('nova.compute.monitor') + + def update_rrd(instance, name, data): """ Updates the specified RRD file. @@ -255,20 +256,20 @@ class Instance(object): Updates the instances statistics and stores the resulting graphs in the internal object store on the cloud controller. """ - logging.debug(_('updating %s...'), self.instance_id) + LOG.debug(_('updating %s...'), self.instance_id) try: data = self.fetch_cpu_stats() if data != None: - logging.debug('CPU: %s', data) + LOG.debug('CPU: %s', data) update_rrd(self, 'cpu', data) data = self.fetch_net_stats() - logging.debug('NET: %s', data) + LOG.debug('NET: %s', data) update_rrd(self, 'net', data) data = self.fetch_disk_stats() - logging.debug('DISK: %s', data) + LOG.debug('DISK: %s', data) update_rrd(self, 'disk', data) # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls @@ -285,7 +286,7 @@ class Instance(object): graph_disk(self, '1w') graph_disk(self, '1m') except Exception: - logging.exception(_('unexpected error during update')) + LOG.exception(_('unexpected error during update')) self.last_updated = utcnow() @@ -309,7 +310,7 @@ class Instance(object): self.cputime = float(info['cpu_time']) self.cputime_last_updated = utcnow() - logging.debug('CPU: %d', self.cputime) + LOG.debug('CPU: %d', self.cputime) # Skip calculation on first pass. Need delta to get a meaningful value. if cputime_last_updated == None: @@ -319,17 +320,17 @@ class Instance(object): d = self.cputime_last_updated - cputime_last_updated t = d.days * 86400 + d.seconds - logging.debug('t = %d', t) + LOG.debug('t = %d', t) # Calculate change over time in number of nanoseconds of CPU time used. cputime_delta = self.cputime - cputime_last - logging.debug('cputime_delta = %s', cputime_delta) + LOG.debug('cputime_delta = %s', cputime_delta) # Get the number of virtual cpus in this domain. vcpus = int(info['num_cpu']) - logging.debug('vcpus = %d', vcpus) + LOG.debug('vcpus = %d', vcpus) # Calculate CPU % used and cap at 100. return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100) @@ -351,8 +352,8 @@ class Instance(object): rd += rd_bytes wr += wr_bytes except TypeError: - logging.error(_('Cannot get blockstats for "%s" on "%s"'), - disk, self.instance_id) + LOG.error(_('Cannot get blockstats for "%s" on "%s"'), + disk, self.instance_id) raise return '%d:%d' % (rd, wr) @@ -373,8 +374,8 @@ class Instance(object): rx += stats[0] tx += stats[4] except TypeError: - logging.error(_('Cannot get ifstats for "%s" on "%s"'), - interface, self.instance_id) + LOG.error(_('Cannot get ifstats for "%s" on "%s"'), + interface, self.instance_id) raise return '%d:%d' % (rx, tx) @@ -408,7 +409,7 @@ class InstanceMonitor(object, service.Service): try: conn = virt_connection.get_connection(read_only=True) except Exception, exn: - logging.exception(_('unexpected exception getting connection')) + LOG.exception(_('unexpected exception getting connection')) time.sleep(FLAGS.monitoring_instances_delay) return @@ -416,14 +417,14 @@ class InstanceMonitor(object, service.Service): try: self.updateInstances_(conn, domain_ids) except Exception, exn: - logging.exception('updateInstances_') + LOG.exception('updateInstances_') def updateInstances_(self, conn, domain_ids): for domain_id in domain_ids: if not domain_id in self._instances: instance = Instance(conn, domain_id) self._instances[domain_id] = instance - logging.debug(_('Found instance: %s'), domain_id) + LOG.debug(_('Found instance: %s'), domain_id) for key in self._instances.keys(): instance = self._instances[key] diff --git a/nova/console/__init__.py b/nova/console/__init__.py new file mode 100644 index 000000000..dfc72cd61 --- /dev/null +++ b/nova/console/__init__.py @@ -0,0 +1,13 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +""" +:mod:`nova.console` -- Console Prxy to set up VM console access (i.e. with xvp) +===================================================== + +.. automodule:: nova.console + :platform: Unix + :synopsis: Wrapper around console proxies such as xvp to set up + multitenant VM console access +.. moduleauthor:: Monsyne Dragon <mdragon@rackspace.com> +""" +from nova.console.api import API diff --git a/nova/console/api.py b/nova/console/api.py new file mode 100644 index 000000000..3850d2c44 --- /dev/null +++ b/nova/console/api.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles ConsoleProxy API requests +""" + +from nova import exception +from nova.db import base + + +from nova import flags +from nova import rpc + + +FLAGS = flags.FLAGS + + +class API(base.Base): + """API for spining up or down console proxy connections""" + + def __init__(self, **kwargs): + super(API, self).__init__(**kwargs) + + def get_consoles(self, context, instance_id): + return self.db.console_get_all_by_instance(context, instance_id) + + def get_console(self, context, instance_id, console_id): + return self.db.console_get(context, console_id, instance_id) + + def delete_console(self, context, instance_id, console_id): + console = self.db.console_get(context, + console_id, + instance_id) + pool = console['pool'] + rpc.cast(context, + self.db.queue_get_for(context, + FLAGS.console_topic, + pool['host']), + {"method": "remove_console", + "args": {"console_id": console['id']}}) + + def create_console(self, context, instance_id): + instance = self.db.instance_get(context, instance_id) + #NOTE(mdragon): If we wanted to return this the console info + # here, as we would need to do a call. + # They can just do an index later to fetch + # console info. I am not sure which is better + # here. + rpc.cast(context, + self._get_console_topic(context, instance['host']), + {"method": "add_console", + "args": {"instance_id": instance_id}}) + + def _get_console_topic(self, context, instance_host): + topic = self.db.queue_get_for(context, + FLAGS.compute_topic, + instance_host) + return rpc.call(context, + topic, + {"method": "get_console_topic", "args": {'fake': 1}}) diff --git a/nova/console/fake.py b/nova/console/fake.py new file mode 100644 index 000000000..7a90d5221 --- /dev/null +++ b/nova/console/fake.py @@ -0,0 +1,58 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Fake ConsoleProxy driver for tests. +""" + +from nova import exception + + +class FakeConsoleProxy(object): + """Fake ConsoleProxy driver.""" + + @property + def console_type(self): + return "fake" + + def setup_console(self, context, console): + """Sets up actual proxies""" + pass + + def teardown_console(self, context, console): + """Tears down actual proxies""" + pass + + def init_host(self): + """Start up any config'ed consoles on start""" + pass + + def generate_password(self, length=8): + """Returns random console password""" + return "fakepass" + + def get_port(self, context): + """get available port for consoles that need one""" + return 5999 + + def fix_pool_password(self, password): + """Trim password to length, and any other massaging""" + return password + + def fix_console_password(self, password): + """Trim password to length, and any other massaging""" + return password diff --git a/nova/console/manager.py b/nova/console/manager.py new file mode 100644 index 000000000..c55ca8e8f --- /dev/null +++ b/nova/console/manager.py @@ -0,0 +1,127 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Console Proxy Service +""" + +import functools +import logging +import socket + +from nova import exception +from nova import flags +from nova import manager +from nova import rpc +from nova import utils + +FLAGS = flags.FLAGS +flags.DEFINE_string('console_driver', + 'nova.console.xvp.XVPConsoleProxy', + 'Driver to use for the console proxy') +flags.DEFINE_boolean('stub_compute', False, + 'Stub calls to compute worker for tests') +flags.DEFINE_string('console_public_hostname', + socket.gethostname(), + 'Publicly visable name for this console host') + + +class ConsoleProxyManager(manager.Manager): + + """ Sets up and tears down any proxy connections needed for accessing + instance consoles securely""" + + def __init__(self, console_driver=None, *args, **kwargs): + if not console_driver: + console_driver = FLAGS.console_driver + self.driver = utils.import_object(console_driver) + super(ConsoleProxyManager, self).__init__(*args, **kwargs) + self.driver.host = self.host + + def init_host(self): + self.driver.init_host() + + @exception.wrap_exception + def add_console(self, context, instance_id, password=None, + port=None, **kwargs): + instance = self.db.instance_get(context, instance_id) + host = instance['host'] + name = instance['name'] + pool = self.get_pool_for_instance_host(context, host) + try: + console = self.db.console_get_by_pool_instance(context, + pool['id'], + instance_id) + except exception.NotFound: + logging.debug("Adding console") + if not password: + password = self.driver.generate_password() + if not port: + port = self.driver.get_port(context) + console_data = {'instance_name': name, + 'instance_id': instance_id, + 'password': password, + 'pool_id': pool['id']} + if port: + console_data['port'] = port + console = self.db.console_create(context, console_data) + self.driver.setup_console(context, console) + return console['id'] + + @exception.wrap_exception + def remove_console(self, context, console_id, **_kwargs): + try: + console = self.db.console_get(context, console_id) + except exception.NotFound: + logging.debug(_('Tried to remove non-existant console ' + '%(console_id)s.') % + {'console_id': console_id}) + return + self.db.console_delete(context, console_id) + self.driver.teardown_console(context, console) + + def get_pool_for_instance_host(self, context, instance_host): + context = context.elevated() + console_type = self.driver.console_type + try: + pool = self.db.console_pool_get_by_host_type(context, + instance_host, + self.host, + console_type) + except exception.NotFound: + #NOTE(mdragon): Right now, the only place this info exists is the + # compute worker's flagfile, at least for + # xenserver. Thus we ned to ask. + if FLAGS.stub_compute: + pool_info = {'address': '127.0.0.1', + 'username': 'test', + 'password': '1234pass'} + else: + pool_info = rpc.call(context, + self.db.queue_get_for(context, + FLAGS.compute_topic, + instance_host), + {"method": "get_console_pool_info", + "args": {"console_type": console_type}}) + pool_info['password'] = self.driver.fix_pool_password( + pool_info['password']) + pool_info['host'] = self.host + pool_info['public_hostname'] = FLAGS.console_public_hostname + pool_info['console_type'] = self.driver.console_type + pool_info['compute_host'] = instance_host + pool = self.db.console_pool_create(context, pool_info) + return pool diff --git a/nova/console/xvp.conf.template b/nova/console/xvp.conf.template new file mode 100644 index 000000000..695ddbe96 --- /dev/null +++ b/nova/console/xvp.conf.template @@ -0,0 +1,16 @@ +# One time password use with time window +OTP ALLOW IPCHECK HTTP 60 +#if $multiplex_port +MULTIPLEX $multiplex_port +#end if + +#for $pool in $pools +POOL $pool.address + DOMAIN $pool.address + MANAGER root $pool.password + HOST $pool.address + VM - dummy 0123456789ABCDEF + #for $console in $pool.consoles + VM #if $multiplex_port then '-' else $console.port # $console.instance_name $pass_encode($console.password) + #end for +#end for diff --git a/nova/console/xvp.py b/nova/console/xvp.py new file mode 100644 index 000000000..2a76223da --- /dev/null +++ b/nova/console/xvp.py @@ -0,0 +1,194 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +XVP (Xenserver VNC Proxy) driver. +""" + +import fcntl +import logging +import os +import signal +import subprocess + +from Cheetah.Template import Template + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import utils + +flags.DEFINE_string('console_xvp_conf_template', + utils.abspath('console/xvp.conf.template'), + 'XVP conf template') +flags.DEFINE_string('console_xvp_conf', + '/etc/xvp.conf', + 'generated XVP conf file') +flags.DEFINE_string('console_xvp_pid', + '/var/run/xvp.pid', + 'XVP master process pid file') +flags.DEFINE_string('console_xvp_log', + '/var/log/xvp.log', + 'XVP log file') +flags.DEFINE_integer('console_xvp_multiplex_port', + 5900, + "port for XVP to multiplex VNC connections on") +FLAGS = flags.FLAGS + + +class XVPConsoleProxy(object): + """Sets up XVP config, and manages xvp daemon""" + + def __init__(self): + self.xvpconf_template = open(FLAGS.console_xvp_conf_template).read() + self.host = FLAGS.host # default, set by manager. + super(XVPConsoleProxy, self).__init__() + + @property + def console_type(self): + return "vnc+xvp" + + def get_port(self, context): + """get available port for consoles that need one""" + #TODO(mdragon): implement port selection for non multiplex ports, + # we are not using that, but someone else may want + # it. + return FLAGS.console_xvp_multiplex_port + + def setup_console(self, context, console): + """Sets up actual proxies""" + self._rebuild_xvp_conf(context.elevated()) + + def teardown_console(self, context, console): + """Tears down actual proxies""" + self._rebuild_xvp_conf(context.elevated()) + + def init_host(self): + """Start up any config'ed consoles on start""" + ctxt = context.get_admin_context() + self._rebuild_xvp_conf(ctxt) + + def fix_pool_password(self, password): + """Trim password to length, and encode""" + return self._xvp_encrypt(password, is_pool_password=True) + + def fix_console_password(self, password): + """Trim password to length, and encode""" + return self._xvp_encrypt(password) + + def generate_password(self, length=8): + """Returns random console password""" + return os.urandom(length * 2).encode('base64')[:length] + + def _rebuild_xvp_conf(self, context): + logging.debug("Rebuilding xvp conf") + pools = [pool for pool in + db.console_pool_get_all_by_host_type(context, self.host, + self.console_type) + if pool['consoles']] + if not pools: + logging.debug("No console pools!") + self._xvp_stop() + return + conf_data = {'multiplex_port': FLAGS.console_xvp_multiplex_port, + 'pools': pools, + 'pass_encode': self.fix_console_password} + config = str(Template(self.xvpconf_template, searchList=[conf_data])) + self._write_conf(config) + self._xvp_restart() + + def _write_conf(self, config): + logging.debug('Re-wrote %s' % FLAGS.console_xvp_conf) + with open(FLAGS.console_xvp_conf, 'w') as cfile: + cfile.write(config) + + def _xvp_stop(self): + logging.debug("Stopping xvp") + pid = self._xvp_pid() + if not pid: + return + try: + os.kill(pid, signal.SIGTERM) + except OSError: + #if it's already not running, no problem. + pass + + def _xvp_start(self): + if self._xvp_check_running(): + return + logging.debug("Starting xvp") + try: + utils.execute('xvp -p %s -c %s -l %s' % + (FLAGS.console_xvp_pid, + FLAGS.console_xvp_conf, + FLAGS.console_xvp_log)) + except exception.ProcessExecutionError, err: + logging.error("Error starting xvp: %s" % err) + + def _xvp_restart(self): + logging.debug("Restarting xvp") + if not self._xvp_check_running(): + logging.debug("xvp not running...") + self._xvp_start() + else: + pid = self._xvp_pid() + os.kill(pid, signal.SIGUSR1) + + def _xvp_pid(self): + try: + with open(FLAGS.console_xvp_pid, 'r') as pidfile: + pid = int(pidfile.read()) + except IOError: + return None + except ValueError: + return None + return pid + + def _xvp_check_running(self): + pid = self._xvp_pid() + if not pid: + return False + try: + os.kill(pid, 0) + except OSError: + return False + return True + + def _xvp_encrypt(self, password, is_pool_password=False): + """Call xvp to obfuscate passwords for config file. + + Args: + - password: the password to encode, max 8 char for vm passwords, + and 16 chars for pool passwords. passwords will + be trimmed to max len before encoding. + - is_pool_password: True if this this is the XenServer api password + False if it's a VM console password + (xvp uses different keys and max lengths for pool passwords) + + Note that xvp's obfuscation should not be considered 'real' encryption. + It simply DES encrypts the passwords with static keys plainly viewable + in the xvp source code.""" + maxlen = 8 + flag = '-e' + if is_pool_password: + maxlen = 16 + flag = '-x' + #xvp will blow up on passwords that are too long (mdragon) + password = password[:maxlen] + out, err = utils.execute('xvp %s' % flag, process_input=password) + return out.strip() diff --git a/nova/crypto.py b/nova/crypto.py index b8405552d..a34b940f5 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -24,7 +24,6 @@ Includes root and intermediate CAs, SSH key_pairs and x509 certificates. import base64 import gettext import hashlib -import logging import os import shutil import struct @@ -39,8 +38,10 @@ gettext.install('nova', unicode=1) from nova import context from nova import db from nova import flags +from nova import log as logging +LOG = logging.getLogger("nova.crypto") FLAGS = flags.FLAGS flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA')) flags.DEFINE_string('key_file', @@ -254,7 +255,7 @@ def _sign_csr(csr_text, ca_folder): csrfile = open(inbound, "w") csrfile.write(csr_text) csrfile.close() - logging.debug(_("Flags path: %s") % ca_folder) + LOG.debug(_("Flags path: %s"), ca_folder) start = os.getcwd() # Change working dir to CA os.chdir(ca_folder) diff --git a/nova/db/api.py b/nova/db/api.py index 0fa5eb1e8..f9d561587 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -42,6 +42,10 @@ flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') flags.DEFINE_boolean('enable_new_services', True, 'Services to be added to the available pool on create') +flags.DEFINE_string('instance_name_template', 'instance-%08x', + 'Template string to be used to generate instance names') +flags.DEFINE_string('volume_name_template', 'volume-%08x', + 'Template string to be used to generate instance names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], @@ -81,11 +85,21 @@ def service_get(context, service_id): return IMPL.service_get(context, service_id) +def service_get_all(context, disabled=False): + """Get all service.""" + return IMPL.service_get_all(context, None, disabled) + + def service_get_all_by_topic(context, topic): - """Get all compute services for a given topic.""" + """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) +def service_get_all_by_host(context, host): + """Get all services for a given host.""" + return IMPL.service_get_all_by_host(context, host) + + def service_get_all_compute_sorted(context): """Get all compute services sorted by instance count. @@ -285,6 +299,10 @@ def fixed_ip_get_instance(context, address): return IMPL.fixed_ip_get_instance(context, address) +def fixed_ip_get_instance_v6(context, address): + return IMPL.fixed_ip_get_instance_v6(context, address) + + def fixed_ip_get_network(context, address): """Get a network for a fixed ip by address.""" return IMPL.fixed_ip_get_network(context, address) @@ -343,6 +361,10 @@ def instance_get_fixed_address(context, instance_id): return IMPL.instance_get_fixed_address(context, instance_id) +def instance_get_fixed_address_v6(context, instance_id): + return IMPL.instance_get_fixed_address_v6(context, instance_id) + + def instance_get_floating_address(context, instance_id): """Get the first floating ip address of an instance.""" return IMPL.instance_get_floating_address(context, instance_id) @@ -538,6 +560,10 @@ def project_get_network(context, project_id, associate=True): return IMPL.project_get_network(context, project_id) +def project_get_network_v6(context, project_id): + return IMPL.project_get_network_v6(context, project_id) + + ################### @@ -772,6 +798,13 @@ def security_group_rule_get_by_security_group(context, security_group_id): security_group_id) +def security_group_rule_get_by_security_group_grantee(context, + security_group_id): + """Get all rules that grant access to the given security group.""" + return IMPL.security_group_rule_get_by_security_group_grantee(context, + security_group_id) + + def security_group_rule_destroy(context, security_group_rule_id): """Deletes a security group rule.""" return IMPL.security_group_rule_destroy(context, security_group_rule_id) @@ -894,3 +927,57 @@ def host_get_networks(context, host): """ return IMPL.host_get_networks(context, host) + + +################## + + +def console_pool_create(context, values): + """Create console pool.""" + return IMPL.console_pool_create(context, values) + + +def console_pool_get(context, pool_id): + """Get a console pool.""" + return IMPL.console_pool_get(context, pool_id) + + +def console_pool_get_by_host_type(context, compute_host, proxy_host, + console_type): + """Fetch a console pool for a given proxy host, compute host, and type.""" + return IMPL.console_pool_get_by_host_type(context, + compute_host, + proxy_host, + console_type) + + +def console_pool_get_all_by_host_type(context, host, console_type): + """Fetch all pools for given proxy host and type.""" + return IMPL.console_pool_get_all_by_host_type(context, + host, + console_type) + + +def console_create(context, values): + """Create a console.""" + return IMPL.console_create(context, values) + + +def console_delete(context, console_id): + """Delete a console.""" + return IMPL.console_delete(context, console_id) + + +def console_get_by_pool_instance(context, pool_id, instance_id): + """Get console entry for a given instance and pool.""" + return IMPL.console_get_by_pool_instance(context, pool_id, instance_id) + + +def console_get_all_by_instance(context, instance_id): + """Get consoles for a given instance.""" + return IMPL.console_get_all_by_instance(context, instance_id) + + +def console_get(context, console_id, instance_id=None): + """Get a specific console (possibly on a given instance).""" + return IMPL.console_get(context, console_id, instance_id) diff --git a/nova/db/migration.py b/nova/db/migration.py new file mode 100644 index 000000000..e54b90cd8 --- /dev/null +++ b/nova/db/migration.py @@ -0,0 +1,38 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Database setup and migration commands.""" + +from nova import flags +from nova import utils + +FLAGS = flags.FLAGS +flags.DECLARE('db_backend', 'nova.db.api') + + +IMPL = utils.LazyPluggable(FLAGS['db_backend'], + sqlalchemy='nova.db.sqlalchemy.migration') + + +def db_sync(version=None): + """Migrate the database to `version` or the most recent version.""" + return IMPL.db_sync(version=version) + + +def db_version(): + """Display the current database version.""" + return IMPL.db_version() diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py index 22aa1cfe6..747015af5 100644 --- a/nova/db/sqlalchemy/__init__.py +++ b/nova/db/sqlalchemy/__init__.py @@ -15,29 +15,3 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -""" -SQLAlchemy database backend -""" -import logging -import time - -from sqlalchemy.exc import OperationalError - -from nova import flags -from nova.db.sqlalchemy import models - - -FLAGS = flags.FLAGS - - -for i in xrange(FLAGS.sql_max_retries): - if i > 0: - time.sleep(FLAGS.sql_retry_interval) - - try: - models.register_models() - break - except OperationalError: - logging.exception(_("Data store is unreachable." - " Trying again in %d seconds.") % FLAGS.sql_retry_interval) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index aaa07e3c9..b63b84bed 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -135,6 +135,18 @@ def service_get(context, service_id, session=None): @require_admin_context +def service_get_all(context, session=None, disabled=False): + if not session: + session = get_session() + + result = session.query(models.Service).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(disabled=disabled).\ + all() + return result + + +@require_admin_context def service_get_all_by_topic(context, topic): session = get_session() return session.query(models.Service).\ @@ -145,6 +157,15 @@ def service_get_all_by_topic(context, topic): @require_admin_context +def service_get_all_by_host(context, host): + session = get_session() + return session.query(models.Service).\ + filter_by(deleted=False).\ + filter_by(host=host).\ + all() + + +@require_admin_context def _service_get_all_topic_subquery(context, session, topic, subq, label): sort_value = getattr(subq.c, label) return session.query(models.Service, func.coalesce(sort_value, 0)).\ @@ -585,6 +606,17 @@ def fixed_ip_get_instance(context, address): return fixed_ip_ref.instance +@require_context +def fixed_ip_get_instance_v6(context, address): + session = get_session() + mac = utils.to_mac(address) + + result = session.query(models.Instance).\ + filter_by(mac_address=mac).\ + first() + return result + + @require_admin_context def fixed_ip_get_network(context, address): fixed_ip_ref = fixed_ip_get_by_address(context, address) @@ -650,7 +682,7 @@ def instance_get(context, instance_id, session=None): if is_admin_context(context): result = session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload('security_groups')).\ + options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ @@ -658,7 +690,7 @@ def instance_get(context, instance_id, session=None): elif is_user_context(context): result = session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload('security_groups')).\ + options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ @@ -743,13 +775,16 @@ def instance_get_by_id(context, instance_id): if is_admin_context(context): result = session.query(models.Instance).\ + options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.floating_ips')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.floating_ips')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ @@ -771,6 +806,17 @@ def instance_get_fixed_address(context, instance_id): @require_context +def instance_get_fixed_address_v6(context, instance_id): + session = get_session() + with session.begin(): + instance_ref = instance_get(context, instance_id, session=session) + network_ref = network_get_by_instance(context, instance_id) + prefix = network_ref.cidr_v6 + mac = instance_ref.mac_address + return utils.to_global_ipv6(prefix, mac) + + +@require_context def instance_get_floating_address(context, instance_id): session = get_session() with session.begin(): @@ -840,12 +886,9 @@ def instance_action_create(context, values): def instance_get_actions(context, instance_id): """Return the actions associated to the given instance id""" session = get_session() - actions = {} - for action in session.query(models.InstanceActions).\ + return session.query(models.InstanceActions).\ filter_by(instance_id=instance_id).\ - all(): - actions[action.action] = action.error - return actions + all() ################### @@ -1110,6 +1153,11 @@ def project_get_network(context, project_id, associate=True): return result +@require_context +def project_get_network_v6(context, project_id): + return project_get_network(context, project_id) + + ################### @@ -1582,6 +1630,44 @@ def security_group_rule_get(context, security_group_rule_id, session=None): @require_context +def security_group_rule_get_by_security_group(context, security_group_id, + session=None): + if not session: + session = get_session() + if is_admin_context(context): + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(parent_group_id=security_group_id).\ + all() + else: + # TODO(vish): Join to group and check for project_id + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=False).\ + filter_by(parent_group_id=security_group_id).\ + all() + return result + + +@require_context +def security_group_rule_get_by_security_group_grantee(context, + security_group_id, + session=None): + if not session: + session = get_session() + if is_admin_context(context): + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(group_id=security_group_id).\ + all() + else: + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=False).\ + filter_by(group_id=security_group_id).\ + all() + return result + + +@require_context def security_group_rule_create(context, values): security_group_rule_ref = models.SecurityGroupIngressRule() security_group_rule_ref.update(values) @@ -1816,3 +1902,111 @@ def host_get_networks(context, host): filter_by(deleted=False).\ filter_by(host=host).\ all() + + +################## + + +def console_pool_create(context, values): + pool = models.ConsolePool() + pool.update(values) + pool.save() + return pool + + +def console_pool_get(context, pool_id): + session = get_session() + result = session.query(models.ConsolePool).\ + filter_by(deleted=False).\ + filter_by(id=pool_id).\ + first() + if not result: + raise exception.NotFound(_("No console pool with id %(pool_id)s") % + {'pool_id': pool_id}) + + return result + + +def console_pool_get_by_host_type(context, compute_host, host, + console_type): + session = get_session() + result = session.query(models.ConsolePool).\ + filter_by(host=host).\ + filter_by(console_type=console_type).\ + filter_by(compute_host=compute_host).\ + filter_by(deleted=False).\ + options(joinedload('consoles')).\ + first() + if not result: + raise exception.NotFound(_('No console pool of type %(type)s ' + 'for compute host %(compute_host)s ' + 'on proxy host %(host)s') % + {'type': console_type, + 'compute_host': compute_host, + 'host': host}) + return result + + +def console_pool_get_all_by_host_type(context, host, console_type): + session = get_session() + return session.query(models.ConsolePool).\ + filter_by(host=host).\ + filter_by(console_type=console_type).\ + filter_by(deleted=False).\ + options(joinedload('consoles')).\ + all() + + +def console_create(context, values): + console = models.Console() + console.update(values) + console.save() + return console + + +def console_delete(context, console_id): + session = get_session() + with session.begin(): + # consoles are meant to be transient. (mdragon) + session.execute('delete from consoles ' + 'where id=:id', {'id': console_id}) + + +def console_get_by_pool_instance(context, pool_id, instance_id): + session = get_session() + result = session.query(models.Console).\ + filter_by(pool_id=pool_id).\ + filter_by(instance_id=instance_id).\ + options(joinedload('pool')).\ + first() + if not result: + raise exception.NotFound(_('No console for instance %(instance_id)s ' + 'in pool %(pool_id)s') % + {'instance_id': instance_id, + 'pool_id': pool_id}) + return result + + +def console_get_all_by_instance(context, instance_id): + session = get_session() + results = session.query(models.Console).\ + filter_by(instance_id=instance_id).\ + options(joinedload('pool')).\ + all() + return results + + +def console_get(context, console_id, instance_id=None): + session = get_session() + query = session.query(models.Console).\ + filter_by(id=console_id) + if instance_id: + query = query.filter_by(instance_id=instance_id) + result = query.options(joinedload('pool')).first() + if not result: + idesc = (_("on instance %s") % instance_id) if instance_id else "" + raise exception.NotFound(_("No console with id %(console_id)s" + " %(instance)s") % + {'instance': idesc, + 'console_id': console_id}) + return result diff --git a/nova/db/sqlalchemy/migrate_repo/README b/nova/db/sqlalchemy/migrate_repo/README new file mode 100644 index 000000000..6218f8cac --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/README @@ -0,0 +1,4 @@ +This is a database migration repository. + +More information at +http://code.google.com/p/sqlalchemy-migrate/ diff --git a/nova/db/sqlalchemy/migrate_repo/__init__.py b/nova/db/sqlalchemy/migrate_repo/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/__init__.py diff --git a/nova/db/sqlalchemy/migrate_repo/manage.py b/nova/db/sqlalchemy/migrate_repo/manage.py new file mode 100644 index 000000000..09e340f44 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/manage.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +from migrate.versioning.shell import main +if __name__ == '__main__': + main(debug='False', repository='.') diff --git a/nova/db/sqlalchemy/migrate_repo/migrate.cfg b/nova/db/sqlalchemy/migrate_repo/migrate.cfg new file mode 100644 index 000000000..2c75fb763 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=nova + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=[] diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py new file mode 100644 index 000000000..a312a7190 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py @@ -0,0 +1,547 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +## Table code mostly autogenerated by genmodel.py +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +auth_tokens = Table('auth_tokens', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('token_hash', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('user_id', Integer()), + Column('server_manageent_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('storage_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('cdn_management_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +export_devices = Table('export_devices', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('shelf_id', Integer()), + Column('blade_id', Integer()), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + +fixed_ips = Table('fixed_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('network_id', + Integer(), + ForeignKey('networks.id'), + nullable=True), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('allocated', Boolean(create_constraint=True, name=None)), + Column('leased', Boolean(create_constraint=True, name=None)), + Column('reserved', Boolean(create_constraint=True, name=None)), + ) + + +floating_ips = Table('floating_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fixed_ip_id', + Integer(), + ForeignKey('fixed_ips.id'), + nullable=True), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instances = Table('instances', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('internal_id', Integer()), + Column('admin_pass', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('image_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('kernel_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('ramdisk_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('launch_index', Integer()), + Column('key_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('key_data', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('state', Integer()), + Column('state_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('memory_mb', Integer()), + Column('vcpus', Integer()), + Column('local_gb', Integer()), + Column('hostname', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_data', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('reservation_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('mac_address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +iscsi_targets = Table('iscsi_targets', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('target_num', Integer()), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + +key_pairs = Table('key_pairs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fingerprint', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_key', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +networks = Table('networks', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('injected', Boolean(create_constraint=True, name=None)), + Column('cidr', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('netmask', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('bridge', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('gateway', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('broadcast', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dns', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vlan', Integer()), + Column('vpn_public_address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vpn_public_port', Integer()), + Column('vpn_private_address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dhcp_start', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +projects = Table('projects', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_manager', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id')), + ) + + +quotas = Table('quotas', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instances', Integer()), + Column('cores', Integer()), + Column('volumes', Integer()), + Column('gigabytes', Integer()), + Column('floating_ips', Integer()), + ) + + +security_groups = Table('security_groups', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +security_group_inst_assoc = Table('security_group_instance_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('security_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('instance_id', Integer(), ForeignKey('instances.id')), + ) + + +security_group_rules = Table('security_group_rules', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('parent_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('protocol', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('from_port', Integer()), + Column('to_port', Integer()), + Column('cidr', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('group_id', + Integer(), + ForeignKey('security_groups.id')), + ) + + +services = Table('services', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('binary', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('topic', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('report_count', Integer(), nullable=False), + Column('disabled', Boolean(create_constraint=True, name=None)), + ) + + +users = Table('users', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('access_key', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('secret_key', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('is_admin', Boolean(create_constraint=True, name=None)), + ) + + +user_project_association = Table('user_project_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('projects.id'), + primary_key=True, + nullable=False), + ) + + +user_project_role_association = Table('user_project_role_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('role', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ForeignKeyConstraint(['user_id', + 'project_id'], + ['user_project_association.user_id', + 'user_project_association.project_id']), + ) + + +user_role_association = Table('user_role_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('role', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ) + + +volumes = Table('volumes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('ec2_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('size', Integer()), + Column('availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('mountpoint', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_time', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + for table in (auth_tokens, export_devices, fixed_ips, floating_ips, + instances, iscsi_targets, key_pairs, networks, + projects, quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, + user_project_association, user_project_role_association, + user_role_association, volumes): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + for table in (auth_tokens, export_devices, fixed_ips, floating_ips, + instances, iscsi_targets, key_pairs, networks, + projects, quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, + user_project_association, user_project_role_association, + user_role_association, volumes): + table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py new file mode 100644 index 000000000..bd3a3e6f8 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -0,0 +1,209 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +services = Table('services', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +networks = Table('networks', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# +certificates = Table('certificates', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('file_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +consoles = Table('consoles', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', Integer()), + Column('password', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('port', Integer(), nullable=True), + Column('pool_id', + Integer(), + ForeignKey('console_pools.id')), + ) + + +console_pools = Table('console_pools', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('console_type', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_hostname', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('compute_host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instance_actions = Table('instance_actions', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id')), + Column('action', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('error', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +# +# Tables to alter +# +auth_tokens = Table('auth_tokens', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('token_hash', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('user_id', Integer()), + Column('server_manageent_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('storage_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('cdn_management_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instances_availability_zone = Column( + 'availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +instances_locked = Column('locked', + Boolean(create_constraint=True, name=None)) + + +networks_cidr_v6 = Column( + 'cidr_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + +networks_ra_server = Column( + 'ra_server', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +services_availability_zone = Column( + 'availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (certificates, consoles, console_pools, instance_actions): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + auth_tokens.c.user_id.alter(type=String(length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + instances.create_column(instances_availability_zone) + instances.create_column(instances_locked) + networks.create_column(networks_cidr_v6) + networks.create_column(networks_ra_server) + services.create_column(services_availability_zone) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/__init__.py b/nova/db/sqlalchemy/migrate_repo/versions/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/__init__.py diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py new file mode 100644 index 000000000..33d14827b --- /dev/null +++ b/nova/db/sqlalchemy/migration.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from nova import flags + +import sqlalchemy +from migrate.versioning import api as versioning_api +from migrate.versioning import exceptions as versioning_exceptions + +FLAGS = flags.FLAGS + + +def db_sync(version=None): + db_version() + repo_path = _find_migrate_repo() + return versioning_api.upgrade(FLAGS.sql_connection, repo_path, version) + + +def db_version(): + repo_path = _find_migrate_repo() + try: + return versioning_api.db_version(FLAGS.sql_connection, repo_path) + except versioning_exceptions.DatabaseNotControlledError: + # If we aren't version controlled we may already have the database + # in the state from before we started version control, check for that + # and set up version_control appropriately + meta = sqlalchemy.MetaData() + engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False) + meta.reflect(bind=engine) + try: + for table in ('auth_tokens', 'export_devices', 'fixed_ips', + 'floating_ips', 'instances', 'iscsi_targets', + 'key_pairs', 'networks', 'projects', 'quotas', + 'security_group_rules', + 'security_group_instance_association', 'services', + 'users', 'user_project_association', + 'user_project_role_association', 'volumes'): + assert table in meta.tables + return db_version_control(1) + except AssertionError: + return db_version_control(0) + + +def db_version_control(version=None): + repo_path = _find_migrate_repo() + versioning_api.version_control(FLAGS.sql_connection, repo_path, version) + return version + + +def _find_migrate_repo(): + """Get the path for the migrate repository.""" + path = os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'migrate_repo') + assert os.path.exists(path) + return path diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 62bb1780d..c54ebe3ba 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -90,53 +90,14 @@ class NovaBase(object): setattr(self, k, v) def iteritems(self): - """Make the model object behave like a dict""" - return iter(self) - - -# TODO(vish): Store images in the database instead of file system -#class Image(BASE, NovaBase): -# """Represents an image in the datastore""" -# __tablename__ = 'images' -# id = Column(Integer, primary_key=True) -# ec2_id = Column(String(12), unique=True) -# user_id = Column(String(255)) -# project_id = Column(String(255)) -# image_type = Column(String(255)) -# public = Column(Boolean, default=False) -# state = Column(String(255)) -# location = Column(String(255)) -# arch = Column(String(255)) -# default_kernel_id = Column(String(255)) -# default_ramdisk_id = Column(String(255)) -# -# @validates('image_type') -# def validate_image_type(self, key, image_type): -# assert(image_type in ['machine', 'kernel', 'ramdisk', 'raw']) -# -# @validates('state') -# def validate_state(self, key, state): -# assert(state in ['available', 'pending', 'disabled']) -# -# @validates('default_kernel_id') -# def validate_kernel_id(self, key, val): -# if val != 'machine': -# assert(val is None) -# -# @validates('default_ramdisk_id') -# def validate_ramdisk_id(self, key, val): -# if val != 'machine': -# assert(val is None) -# -# -# TODO(vish): To make this into its own table, we need a good place to -# create the host entries. In config somwhere? Or the first -# time any object sets host? This only becomes particularly -# important if we need to store per-host data. -#class Host(BASE, NovaBase): -# """Represents a host where services are running""" -# __tablename__ = 'hosts' -# id = Column(String(255), primary_key=True) + """Make the model object behave like a dict. + + Includes attributes from joins.""" + local = dict(self) + joined = dict([(k, v) for k, v in self.__dict__.iteritems() + if not k[0] == '_']) + local.update(joined) + return local.iteritems() class Service(BASE, NovaBase): @@ -149,6 +110,7 @@ class Service(BASE, NovaBase): topic = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) disabled = Column(Boolean, default=False) + availability_zone = Column(String(255), default='nova') class Certificate(BASE, NovaBase): @@ -168,7 +130,7 @@ class Instance(BASE, NovaBase): @property def name(self): - return "instance-%08x" % self.id + return FLAGS.instance_name_template % self.id admin_pass = Column(String(255)) user_id = Column(String(255)) @@ -224,6 +186,8 @@ class Instance(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) + locked = Column(Boolean) + # TODO(vish): see Ewan's email about state improvements, probably # should be in a driver base class or some such # vmstate_state = running, halted, suspended, paused @@ -253,7 +217,7 @@ class Volume(BASE, NovaBase): @property def name(self): - return "volume-%08x" % self.id + return FLAGS.volume_name_template % self.id user_id = Column(String(255)) project_id = Column(String(255)) @@ -408,6 +372,10 @@ class Network(BASE, NovaBase): injected = Column(Boolean, default=False) cidr = Column(String(255), unique=True) + cidr_v6 = Column(String(255), unique=True) + + ra_server = Column(String(255)) + netmask = Column(String(255)) bridge = Column(String(255)) gateway = Column(String(255)) @@ -538,6 +506,31 @@ class FloatingIp(BASE, NovaBase): host = Column(String(255)) # , ForeignKey('hosts.id')) +class ConsolePool(BASE, NovaBase): + """Represents pool of consoles on the same physical node.""" + __tablename__ = 'console_pools' + id = Column(Integer, primary_key=True) + address = Column(String(255)) + username = Column(String(255)) + password = Column(String(255)) + console_type = Column(String(255)) + public_hostname = Column(String(255)) + host = Column(String(255)) + compute_host = Column(String(255)) + + +class Console(BASE, NovaBase): + """Represents a console session for an instance.""" + __tablename__ = 'consoles' + id = Column(Integer, primary_key=True) + instance_name = Column(String(255)) + instance_id = Column(Integer) + password = Column(String(255)) + port = Column(Integer, nullable=True) + pool_id = Column(Integer, ForeignKey('console_pools.id')) + pool = relationship(ConsolePool, backref=backref('consoles')) + + def register_models(): """Register Models and create metadata. @@ -550,7 +543,7 @@ def register_models(): Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, - Project, Certificate) # , Image, Host + Project, Certificate, ConsolePool, Console) # , Image, Host engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/exception.py b/nova/exception.py index 277033e0f..ecd814e5d 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -21,9 +21,8 @@ Nova base exception handling, including decorator for re-raising Nova-type exceptions. SHOULD include dedicated exception logging. """ -import logging -import sys -import traceback +from nova import log as logging +LOG = logging.getLogger('nova.exception') class ProcessExecutionError(IOError): @@ -77,6 +76,10 @@ class InvalidInputException(Error): pass +class TimeoutException(Error): + pass + + def wrap_exception(f): def _wrap(*args, **kw): try: @@ -84,7 +87,7 @@ def wrap_exception(f): except Exception, e: if not isinstance(e, Error): #exc_type, exc_value, exc_traceback = sys.exc_info() - logging.exception(_('Uncaught exception')) + LOG.exception(_('Uncaught exception')) #logging.error(traceback.extract_stack(exc_traceback)) raise Error(str(e)) raise diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 79d8b894d..7c2d7177b 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -18,12 +18,16 @@ """Based a bit on the carrot.backeds.queue backend... but a lot better.""" -import logging import Queue as queue from carrot.backends import base from eventlet import greenthread +from nova import log as logging + + +LOG = logging.getLogger("nova.fakerabbit") + EXCHANGES = {} QUEUES = {} @@ -41,12 +45,12 @@ class Exchange(object): self._routes = {} def publish(self, message, routing_key=None): - logging.debug(_('(%s) publish (key: %s) %s'), - self.name, routing_key, message) + LOG.debug(_('(%s) publish (key: %s) %s'), + self.name, routing_key, message) routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: - logging.debug(_('Publishing to route %s'), f) + LOG.debug(_('Publishing to route %s'), f) f(message, routing_key=routing_key) def bind(self, callback, routing_key): @@ -76,19 +80,19 @@ class Backend(base.BaseBackend): def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: - logging.debug(_('Declaring queue %s'), queue) + LOG.debug(_('Declaring queue %s'), queue) QUEUES[queue] = Queue(queue) def exchange_declare(self, exchange, type, *args, **kwargs): global EXCHANGES if exchange not in EXCHANGES: - logging.debug(_('Declaring exchange %s'), exchange) + LOG.debug(_('Declaring exchange %s'), exchange) EXCHANGES[exchange] = Exchange(exchange, type) def queue_bind(self, queue, exchange, routing_key, **kwargs): global EXCHANGES global QUEUES - logging.debug(_('Binding %s to %s with key %s'), + LOG.debug(_('Binding %s to %s with key %s'), queue, exchange, routing_key) EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) @@ -113,7 +117,7 @@ class Backend(base.BaseBackend): content_type=content_type, content_encoding=content_encoding) message.result = True - logging.debug(_('Getting from %s: %s'), queue, message) + LOG.debug(_('Getting from %s: %s'), queue, message) return message def prepare_message(self, message_data, delivery_mode, diff --git a/nova/flags.py b/nova/flags.py index 4b7334927..81e2e36f9 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -29,8 +29,6 @@ import sys import gflags -from nova import utils - class FlagValues(gflags.FlagValues): """Extension of gflags.FlagValues that allows undefined and runtime flags. @@ -202,10 +200,22 @@ def DECLARE(name, module_string, flag_values=FLAGS): "%s not defined by %s" % (name, module_string)) +def _get_my_ip(): + """Returns the actual ip of the local machine.""" + try: + csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + csock.connect(('8.8.8.8', 80)) + (addr, port) = csock.getsockname() + csock.close() + return addr + except socket.gaierror as ex: + return "127.0.0.1" + + # __GLOBAL FLAGS ONLY__ # Define any app-specific flags in their own files, docs at: -# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39 - +# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#a9 +DEFINE_string('my_ip', _get_my_ip(), 'host ip address') DEFINE_list('region_list', [], 'list of region=url pairs separated by commas') @@ -213,16 +223,25 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') DEFINE_integer('glance_port', 9292, 'glance port') -DEFINE_string('glance_host', utils.get_my_ip(), 'glance host') +DEFINE_string('glance_host', '$my_ip', 'glance host') DEFINE_integer('s3_port', 3333, 's3 port') -DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)') -DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)') +DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)') +DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') +DEFINE_string('console_topic', 'console', + 'the topic console proxy nodes listen on') DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on') DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on') DEFINE_string('network_topic', 'network', 'the topic network nodes listen on') - +DEFINE_string('ajax_console_proxy_topic', 'ajax_proxy', + 'the topic ajax proxy nodes listen on') +DEFINE_string('ajax_console_proxy_url', + 'http://127.0.0.1:8000', + 'location of ajax console proxy, \ + in the form "http://127.0.0.1:8000"') +DEFINE_string('ajax_console_proxy_port', + 8000, 'port that ajax_console_proxy binds') DEFINE_bool('verbose', False, 'show debug output') DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit') DEFINE_bool('fake_network', False, @@ -235,11 +254,15 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') -DEFINE_string('ec2_prefix', 'http', 'prefix for ec2') -DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server') -DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server') -DEFINE_integer('cc_port', 8773, 'cloud controller port') -DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2') +DEFINE_string('ec2_host', '$my_ip', 'ip of api server') +DEFINE_string('ec2_dmz_host', '$my_ip', 'internal ip of api server') +DEFINE_integer('ec2_port', 8773, 'cloud controller port') +DEFINE_string('ec2_scheme', 'http', 'prefix for ec2') +DEFINE_string('ec2_path', '/services/Cloud', 'suffix for ec2') +DEFINE_string('osapi_host', '$my_ip', 'ip of api server') +DEFINE_string('osapi_scheme', 'http', 'prefix for openstack') +DEFINE_integer('osapi_port', 8774, 'OpenStack API port') +DEFINE_string('osapi_path', '/v1.0/', 'suffix for openstack') DEFINE_string('default_project', 'openstack', 'default project for openstack') DEFINE_string('default_image', 'ami-11111', @@ -271,6 +294,8 @@ DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval') DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', 'Manager for compute') +DEFINE_string('console_manager', 'nova.console.manager.ConsoleProxyManager', + 'Manager for console proxy') DEFINE_string('network_manager', 'nova.network.manager.VlanManager', 'Manager for network') DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager', @@ -285,6 +310,5 @@ DEFINE_string('image_service', 'nova.image.s3.S3ImageService', DEFINE_string('host', socket.gethostname(), 'name of this node') -# UNUSED DEFINE_string('node_availability_zone', 'nova', 'availability zone of this node') diff --git a/nova/image/glance.py b/nova/image/glance.py index cb3936df1..593c4bce6 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -14,183 +14,50 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - """Implementation of an image service that uses Glance as the backend""" +from __future__ import absolute_import import httplib import json -import logging import urlparse -import webob.exc - -from nova import utils -from nova import flags from nova import exception -import nova.image.service - -FLAGS = flags.FLAGS - - -flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1', - 'IP address or URL where Glance\'s Teller service resides') -flags.DEFINE_string('glance_teller_port', '9191', - 'Port for Glance\'s Teller service') -flags.DEFINE_string('glance_parallax_address', 'http://127.0.0.1', - 'IP address or URL where Glance\'s Parallax service ' - 'resides') -flags.DEFINE_string('glance_parallax_port', '9292', - 'Port for Glance\'s Parallax service') - - -class TellerClient(object): +from nova import flags +from nova import log as logging +from nova import utils +from nova.image import service - def __init__(self): - self.address = FLAGS.glance_teller_address - self.port = FLAGS.glance_teller_port - url = urlparse.urlparse(self.address) - self.netloc = url.netloc - self.connection_type = {'http': httplib.HTTPConnection, - 'https': httplib.HTTPSConnection}[url.scheme] +LOG = logging.getLogger('nova.image.glance') -class ParallaxClient(object): +FLAGS = flags.FLAGS - def __init__(self): - self.address = FLAGS.glance_parallax_address - self.port = FLAGS.glance_parallax_port - url = urlparse.urlparse(self.address) - self.netloc = url.netloc - self.connection_type = {'http': httplib.HTTPConnection, - 'https': httplib.HTTPSConnection}[url.scheme] - - def get_image_index(self): - """ - Returns a list of image id/name mappings from Parallax - """ - try: - c = self.connection_type(self.netloc, self.port) - c.request("GET", "images") - res = c.getresponse() - if res.status == 200: - # Parallax returns a JSONified dict(images=image_list) - data = json.loads(res.read())['images'] - return data - else: - logging.warn(_("Parallax returned HTTP error %d from " - "request for /images"), res.status_int) - return [] - finally: - c.close() - - def get_image_details(self): - """ - Returns a list of detailed image data mappings from Parallax - """ - try: - c = self.connection_type(self.netloc, self.port) - c.request("GET", "images/detail") - res = c.getresponse() - if res.status == 200: - # Parallax returns a JSONified dict(images=image_list) - data = json.loads(res.read())['images'] - return data - else: - logging.warn(_("Parallax returned HTTP error %d from " - "request for /images/detail"), res.status_int) - return [] - finally: - c.close() - - def get_image_metadata(self, image_id): - """ - Returns a mapping of image metadata from Parallax - """ - try: - c = self.connection_type(self.netloc, self.port) - c.request("GET", "images/%s" % image_id) - res = c.getresponse() - if res.status == 200: - # Parallax returns a JSONified dict(image=image_info) - data = json.loads(res.read())['image'] - return data - else: - # TODO(jaypipes): log the error? - return None - finally: - c.close() - - def add_image_metadata(self, image_metadata): - """ - Tells parallax about an image's metadata - """ - try: - c = self.connection_type(self.netloc, self.port) - body = json.dumps(image_metadata) - c.request("POST", "images", body) - res = c.getresponse() - if res.status == 200: - # Parallax returns a JSONified dict(image=image_info) - data = json.loads(res.read())['image'] - return data['id'] - else: - # TODO(jaypipes): log the error? - return None - finally: - c.close() - - def update_image_metadata(self, image_id, image_metadata): - """ - Updates Parallax's information about an image - """ - try: - c = self.connection_type(self.netloc, self.port) - body = json.dumps(image_metadata) - c.request("PUT", "images/%s" % image_id, body) - res = c.getresponse() - return res.status == 200 - finally: - c.close() - - def delete_image_metadata(self, image_id): - """ - Deletes Parallax's information about an image - """ - try: - c = self.connection_type(self.netloc, self.port) - c.request("DELETE", "images/%s" % image_id) - res = c.getresponse() - return res.status == 200 - finally: - c.close() +GlanceClient = utils.import_class('glance.client.Client') -class GlanceImageService(nova.image.service.BaseImageService): +class GlanceImageService(service.BaseImageService): """Provides storage and retrieval of disk image objects within Glance.""" def __init__(self): - self.teller = TellerClient() - self.parallax = ParallaxClient() + self.client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port) def index(self, context): """ - Calls out to Parallax for a list of images available + Calls out to Glance for a list of images available """ - images = self.parallax.get_image_index() - return images + return self.client.get_images() def detail(self, context): """ - Calls out to Parallax for a list of detailed image information + Calls out to Glance for a list of detailed image information """ - images = self.parallax.get_image_details() - return images + return self.client.get_images_detailed() def show(self, context, id): """ Returns a dict containing image data for the given opaque image id. """ - image = self.parallax.get_image_metadata(id) + image = self.client.get_image_meta(id) if image: return image raise exception.NotFound @@ -202,7 +69,7 @@ class GlanceImageService(nova.image.service.BaseImageService): :raises AlreadyExists if the image already exist. """ - return self.parallax.add_image_metadata(data) + return self.client.add_image(image_meta=data) def update(self, context, image_id, data): """Replace the contents of the given image with the new data. @@ -210,7 +77,7 @@ class GlanceImageService(nova.image.service.BaseImageService): :raises NotFound if the image does not exist. """ - self.parallax.update_image_metadata(image_id, data) + return self.client.update_image(image_id, data) def delete(self, context, image_id): """ @@ -219,7 +86,7 @@ class GlanceImageService(nova.image.service.BaseImageService): :raises NotFound if the image does not exist. """ - self.parallax.delete_image_metadata(image_id) + return self.client.delete_image(image_id) def delete_all(self): """ diff --git a/nova/log.py b/nova/log.py new file mode 100644 index 000000000..4997d3f28 --- /dev/null +++ b/nova/log.py @@ -0,0 +1,256 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Nova logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. + +It also allows setting of formatting information through flags. +""" + + +import cStringIO +import json +import logging +import logging.handlers +import traceback + +from nova import flags +from nova import version + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('logging_context_format_string', + '(%(name)s %(nova_version)s): %(levelname)s ' + '[%(request_id)s %(user)s ' + '%(project)s] %(message)s', + 'format string to use for log messages') + +flags.DEFINE_string('logging_default_format_string', + '(%(name)s %(nova_version)s): %(levelname)s [N/A] ' + '%(message)s', + 'format string to use for log messages') + +flags.DEFINE_string('logging_debug_format_suffix', + 'from %(processName)s (pid=%(process)d) %(funcName)s' + ' %(pathname)s:%(lineno)d', + 'data to append to log format when level is DEBUG') + +flags.DEFINE_string('logging_exception_prefix', + '(%(name)s): TRACE: ', + 'prefix each line of exception output with this format') + +flags.DEFINE_list('default_log_levels', + ['amqplib=WARN', + 'sqlalchemy=WARN', + 'eventlet.wsgi.server=WARN'], + 'list of logger=LEVEL pairs') + +flags.DEFINE_bool('use_syslog', False, 'output to syslog') +flags.DEFINE_string('logfile', None, 'output to named file') + + +# A list of things we want to replicate from logging. +# levels +CRITICAL = logging.CRITICAL +FATAL = logging.FATAL +ERROR = logging.ERROR +WARNING = logging.WARNING +WARN = logging.WARN +INFO = logging.INFO +DEBUG = logging.DEBUG +NOTSET = logging.NOTSET +# methods +getLogger = logging.getLogger +debug = logging.debug +info = logging.info +warning = logging.warning +warn = logging.warn +error = logging.error +exception = logging.exception +critical = logging.critical +log = logging.log +# handlers +StreamHandler = logging.StreamHandler +FileHandler = logging.FileHandler +# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler. +SysLogHandler = logging.handlers.SysLogHandler + + +# our new audit level +AUDIT = logging.INFO + 1 +logging.addLevelName(AUDIT, 'AUDIT') + + +def _dictify_context(context): + if context == None: + return None + if not isinstance(context, dict) \ + and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def basicConfig(): + logging.basicConfig() + for handler in logging.root.handlers: + handler.setFormatter(_formatter) + if FLAGS.verbose: + logging.root.setLevel(logging.DEBUG) + else: + logging.root.setLevel(logging.INFO) + if FLAGS.use_syslog: + syslog = SysLogHandler(address='/dev/log') + syslog.setFormatter(_formatter) + logging.root.addHandler(syslog) + if FLAGS.logfile: + logfile = FileHandler(FLAGS.logfile) + logfile.setFormatter(_formatter) + logging.root.addHandler(logfile) + + +class NovaLogger(logging.Logger): + """ + NovaLogger manages request context and formatting. + + This becomes the class that is instanciated by logging.getLogger. + """ + def __init__(self, name, level=NOTSET): + level_name = self._get_level_from_flags(name, FLAGS) + level = globals()[level_name] + logging.Logger.__init__(self, name, level) + + def _get_level_from_flags(self, name, FLAGS): + # if exactly "nova", or a child logger, honor the verbose flag + if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose: + return 'DEBUG' + for pair in FLAGS.default_log_levels: + logger, _sep, level = pair.partition('=') + # NOTE(todd): if we set a.b, we want a.b.c to have the same level + # (but not a.bc, so we check the dot) + if name == logger: + return level + if name.startswith(logger) and name[len(logger)] == '.': + return level + return 'INFO' + + def _log(self, level, msg, args, exc_info=None, extra=None, context=None): + """Extract context from any log call""" + if not extra: + extra = {} + if context: + extra.update(_dictify_context(context)) + extra.update({"nova_version": version.version_string_with_vcs()}) + logging.Logger._log(self, level, msg, args, exc_info, extra) + + def addHandler(self, handler): + """Each handler gets our custom formatter""" + handler.setFormatter(_formatter) + logging.Logger.addHandler(self, handler) + + def audit(self, msg, *args, **kwargs): + """Shortcut for our AUDIT level""" + if self.isEnabledFor(AUDIT): + self._log(AUDIT, msg, args, **kwargs) + + def exception(self, msg, *args, **kwargs): + """Logging.exception doesn't handle kwargs, so breaks context""" + if not kwargs.get('exc_info'): + kwargs['exc_info'] = 1 + self.error(msg, *args, **kwargs) + # NOTE(todd): does this really go here, or in _log ? + extra = kwargs.get('extra') + if not extra: + return + env = extra.get('environment') + if env: + env = env.copy() + for k in env.keys(): + if not isinstance(env[k], str): + env.pop(k) + message = "Environment: %s" % json.dumps(env) + kwargs.pop('exc_info') + self.error(message, **kwargs) + +logging.setLoggerClass(NovaLogger) + + +class NovaRootLogger(NovaLogger): + pass + +if not isinstance(logging.root, NovaRootLogger): + logging.root = NovaRootLogger("nova.root", WARNING) + NovaLogger.root = logging.root + NovaLogger.manager.root = logging.root + + +class NovaFormatter(logging.Formatter): + """ + A nova.context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_foramt_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + """ + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default""" + if record.__dict__.get('request_id', None): + self._fmt = FLAGS.logging_context_format_string + else: + self._fmt = FLAGS.logging_default_format_string + if record.levelno == logging.DEBUG \ + and FLAGS.logging_debug_format_suffix: + self._fmt += " " + FLAGS.logging_debug_format_suffix + # Cache this on the record, Logger will respect our formated copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with FLAGS.logging_exception_prefix""" + if not record: + return logging.Formatter.formatException(self, exc_info) + stringbuffer = cStringIO.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split("\n") + stringbuffer.close() + formatted_lines = [] + for line in lines: + pl = FLAGS.logging_exception_prefix % record.__dict__ + fl = "%s%s" % (pl, line) + formatted_lines.append(fl) + return "\n".join(formatted_lines) + +_formatter = NovaFormatter() + + +def audit(msg, *args, **kwargs): + """Shortcut for logging to root log with sevrity 'AUDIT'.""" + if len(logging.root.handlers) == 0: + basicConfig() + logging.root.log(AUDIT, msg, *args, **kwargs) diff --git a/nova/network/api.py b/nova/network/api.py index cbd912047..bf43acb51 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -20,15 +20,15 @@ Handles all requests relating to instances (guest vms). """ -import logging - from nova import db from nova import flags +from nova import log as logging from nova import quota from nova import rpc from nova.db import base FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.network') class API(base.Base): @@ -36,7 +36,7 @@ class API(base.Base): def allocate_floating_ip(self, context): if quota.allowed_floating_ips(context, 1) < 1: - logging.warn(_("Quota exceeeded for %s, tried to allocate " + LOG.warn(_("Quota exceeeded for %s, tried to allocate " "address"), context.project_id) raise quota.QuotaError(_("Address quota exceeded. You cannot " diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 931a89554..d29e17603 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -17,16 +17,17 @@ Implements vlans, bridges, and iptables rules using linux utilities. """ -import logging import os -# TODO(ja): does the definition of network_path belong here? - from nova import db from nova import flags +from nova import log as logging from nova import utils +LOG = logging.getLogger("nova.linux_net") + + def _bin_file(script): """Return the absolute path to scipt in the bin directory""" return os.path.abspath(os.path.join(__file__, "../../../bin", script)) @@ -45,10 +46,11 @@ flags.DEFINE_string('vlan_interface', 'eth0', 'network device for vlans') flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'), 'location of nova-dhcpbridge') -flags.DEFINE_string('routing_source_ip', utils.get_my_ip(), +flags.DEFINE_string('routing_source_ip', '$my_ip', 'Public IP of network host') flags.DEFINE_bool('use_nova_chains', False, 'use the nova_ routing chains instead of default') + flags.DEFINE_string('dns_server', None, 'if set, uses specific dns server for dnsmasq') flags.DEFINE_string('dmz_cidr', '10.128.0.0/24', @@ -59,7 +61,7 @@ def metadata_forward(): """Create forwarding rule for metadata""" _confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 " "-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT " - "--to-destination %s:%s" % (FLAGS.cc_dmz, FLAGS.cc_port)) + "--to-destination %s:%s" % (FLAGS.ec2_dmz_host, FLAGS.ec2_port)) def init_host(): @@ -172,7 +174,7 @@ def ensure_vlan(vlan_num): """Create a vlan unless it already exists""" interface = "vlan%s" % vlan_num if not _device_exists(interface): - logging.debug(_("Starting VLAN inteface %s"), interface) + LOG.debug(_("Starting VLAN inteface %s"), interface) _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num)) _execute("sudo ifconfig %s up" % interface) @@ -182,7 +184,7 @@ def ensure_vlan(vlan_num): def ensure_bridge(bridge, interface, net_attrs=None): """Create a bridge unless it already exists""" if not _device_exists(bridge): - logging.debug(_("Starting Bridge interface for %s"), interface) + LOG.debug(_("Starting Bridge interface for %s"), interface) _execute("sudo brctl addbr %s" % bridge) _execute("sudo brctl setfd %s 0" % bridge) # _execute("sudo brctl setageing %s 10" % bridge) @@ -195,6 +197,10 @@ def ensure_bridge(bridge, interface, net_attrs=None): net_attrs['gateway'], net_attrs['broadcast'], net_attrs['netmask'])) + if(FLAGS.use_ipv6): + _execute("sudo ifconfig %s add %s up" % \ + (bridge, + net_attrs['cidr_v6'])) else: _execute("sudo ifconfig %s up" % bridge) if FLAGS.use_nova_chains: @@ -208,6 +214,8 @@ def ensure_bridge(bridge, interface, net_attrs=None): _confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge) _confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge) + _execute("sudo iptables -N nova-local", check_exit_code=False) + _confirm_rule("FORWARD", "-j nova-local") def get_dhcp_hosts(context, network_id): @@ -248,9 +256,9 @@ def update_dhcp(context, network_id): _execute('sudo kill -HUP %d' % pid) return except Exception as exc: # pylint: disable-msg=W0703 - logging.debug(_("Hupping dnsmasq threw %s"), exc) + LOG.debug(_("Hupping dnsmasq threw %s"), exc) else: - logging.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) + LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, @@ -259,6 +267,50 @@ def update_dhcp(context, network_id): _execute(command, addl_env=env) +def update_ra(context, network_id): + network_ref = db.network_get(context, network_id) + + conffile = _ra_file(network_ref['bridge'], 'conf') + with open(conffile, 'w') as f: + conf_str = """ +interface %s +{ + AdvSendAdvert on; + MinRtrAdvInterval 3; + MaxRtrAdvInterval 10; + prefix %s + { + AdvOnLink on; + AdvAutonomous on; + }; +}; +""" % (network_ref['bridge'], network_ref['cidr_v6']) + f.write(conf_str) + + # Make sure radvd can actually read it (it setuid()s to "nobody") + os.chmod(conffile, 0644) + + pid = _ra_pid_for(network_ref['bridge']) + + # if radvd is already running, then tell it to reload + if pid: + out, _err = _execute('cat /proc/%d/cmdline' + % pid, check_exit_code=False) + if conffile in out: + try: + _execute('sudo kill -HUP %d' % pid) + return + except Exception as exc: # pylint: disable-msg=W0703 + LOG.debug(_("Hupping radvd threw %s"), exc) + else: + LOG.debug(_("Pid %d is stale, relaunching radvd"), pid) + command = _ra_cmd(network_ref) + _execute(command) + db.network_update(context, network_id, + {"ra_server": + utils.get_my_linklocal(network_ref['bridge'])}) + + def _host_dhcp(fixed_ip_ref): """Return a host string for an address""" instance_ref = fixed_ip_ref['instance'] @@ -270,7 +322,7 @@ def _host_dhcp(fixed_ip_ref): def _execute(cmd, *args, **kwargs): """Wrapper around utils._execute for fake_network""" if FLAGS.fake_network: - logging.debug("FAKE NET: %s", cmd) + LOG.debug("FAKE NET: %s", cmd) return "fake", 0 else: return utils.execute(cmd, *args, **kwargs) @@ -320,6 +372,15 @@ def _dnsmasq_cmd(net): return ''.join(cmd) +def _ra_cmd(net): + """Builds radvd command""" + cmd = ['sudo -E radvd', +# ' -u nobody', + ' -C %s' % _ra_file(net['bridge'], 'conf'), + ' -p %s' % _ra_file(net['bridge'], 'pid')] + return ''.join(cmd) + + def _stop_dnsmasq(network): """Stops the dnsmasq instance for a given network""" pid = _dnsmasq_pid_for(network) @@ -328,7 +389,7 @@ def _stop_dnsmasq(network): try: _execute('sudo kill -TERM %d' % pid) except Exception as exc: # pylint: disable-msg=W0703 - logging.debug(_("Killing dnsmasq threw %s"), exc) + LOG.debug(_("Killing dnsmasq threw %s"), exc) def _dhcp_file(bridge, kind): @@ -341,6 +402,16 @@ def _dhcp_file(bridge, kind): kind)) +def _ra_file(bridge, kind): + """Return path to a pid or conf file for a bridge""" + + if not os.path.exists(FLAGS.networks_path): + os.makedirs(FLAGS.networks_path) + return os.path.abspath("%s/nova-ra-%s.%s" % (FLAGS.networks_path, + bridge, + kind)) + + def _dnsmasq_pid_for(bridge): """Returns the pid for prior dnsmasq instance for a bridge @@ -354,3 +425,18 @@ def _dnsmasq_pid_for(bridge): if os.path.exists(pid_file): with open(pid_file, 'r') as f: return int(f.read()) + + +def _ra_pid_for(bridge): + """Returns the pid for prior radvd instance for a bridge + + Returns None if no pid file exists + + If machine has rebooted pid might be incorrect (caller should check) + """ + + pid_file = _ra_file(bridge, 'pid') + + if os.path.exists(pid_file): + with open(pid_file, 'r') as f: + return int(f.read()) diff --git a/nova/network/manager.py b/nova/network/manager.py index 16aa8f895..61de8055a 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -45,7 +45,6 @@ topologies. All of the network commands are issued to a subclass of """ import datetime -import logging import math import socket @@ -55,11 +54,13 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import utils from nova import rpc +LOG = logging.getLogger("nova.network.manager") FLAGS = flags.FLAGS flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') @@ -73,7 +74,7 @@ flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2', 'Dhcp start for FlatDhcp') flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support') -flags.DEFINE_string('vpn_ip', utils.get_my_ip(), +flags.DEFINE_string('vpn_ip', '$my_ip', 'Public IP for the cloudpipe VPN servers') flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks') flags.DEFINE_integer('network_size', 256, @@ -81,6 +82,7 @@ flags.DEFINE_integer('network_size', 256, flags.DEFINE_string('floating_range', '4.4.4.0/24', 'Floating IP address block') flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block') +flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block') flags.DEFINE_integer('cnt_vpn_clients', 5, 'Number of addresses reserved for vpn clients') flags.DEFINE_string('network_driver', 'nova.network.linux_net', @@ -89,6 +91,9 @@ flags.DEFINE_bool('update_dhcp_on_disassociate', False, 'Whether to update dhcp when fixed_ip is disassociated') flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600, 'Seconds after which a deallocated ip is disassociated') + +flags.DEFINE_bool('use_ipv6', False, + 'use the ipv6') flags.DEFINE_string('network_host', socket.gethostname(), 'Network host to use for ip allocation in flat modes') flags.DEFINE_bool('fake_call', False, @@ -131,7 +136,7 @@ class NetworkManager(manager.Manager): def set_network_host(self, context, network_id): """Safely sets the host of the network.""" - logging.debug(_("setting network host")) + LOG.debug(_("setting network host"), context=context) host = self.db.network_set_host(context, network_id, self.host) @@ -186,7 +191,7 @@ class NetworkManager(manager.Manager): def lease_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is leased.""" - logging.debug("Leasing IP %s", address) + LOG.debug(_("Leasing IP %s"), address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: @@ -201,12 +206,12 @@ class NetworkManager(manager.Manager): {'leased': True, 'updated_at': now}) if not fixed_ip_ref['allocated']: - logging.warn(_("IP %s leased that was already deallocated"), - address) + LOG.warn(_("IP %s leased that was already deallocated"), address, + context=context) def release_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is released.""" - logging.debug("Releasing IP %s", address) + LOG.debug("Releasing IP %s", address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: @@ -216,7 +221,8 @@ class NetworkManager(manager.Manager): raise exception.Error(_("IP %s released from bad mac %s vs %s") % (address, instance_ref['mac_address'], mac)) if not fixed_ip_ref['leased']: - logging.warn(_("IP %s released that was not leased"), address) + LOG.warn(_("IP %s released that was not leased"), address, + context=context) self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': False}) @@ -233,8 +239,8 @@ class NetworkManager(manager.Manager): """Get the network host for the current context.""" raise NotImplementedError() - def create_networks(self, context, num_networks, network_size, - *args, **kwargs): + def create_networks(self, context, cidr, num_networks, network_size, + cidr_v6, *args, **kwargs): """Create networks based on parameters.""" raise NotImplementedError() @@ -319,9 +325,11 @@ class FlatManager(NetworkManager): pass def create_networks(self, context, cidr, num_networks, network_size, - *args, **kwargs): + cidr_v6, *args, **kwargs): """Create networks based on parameters.""" fixed_net = IPy.IP(cidr) + fixed_net_v6 = IPy.IP(cidr_v6) + significant_bits_v6 = 64 for index in range(num_networks): start = index * network_size significant_bits = 32 - int(math.log(network_size, 2)) @@ -334,7 +342,13 @@ class FlatManager(NetworkManager): net['gateway'] = str(project_net[1]) net['broadcast'] = str(project_net.broadcast()) net['dhcp_start'] = str(project_net[2]) + + if(FLAGS.use_ipv6): + cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6) + net['cidr_v6'] = cidr_v6 + network_ref = self.db.network_create_safe(context, net) + if network_ref: self._create_fixed_ips(context, network_ref['id']) @@ -437,7 +451,7 @@ class VlanManager(NetworkManager): self.host, time) if num: - logging.debug(_("Dissassociated %s stale fixed ip(s)"), num) + LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) def init_host(self): """Do any initialization that needs to be run if this is a @@ -480,12 +494,16 @@ class VlanManager(NetworkManager): network_ref['bridge']) def create_networks(self, context, cidr, num_networks, network_size, - vlan_start, vpn_start): + vlan_start, vpn_start, cidr_v6): """Create networks based on parameters.""" fixed_net = IPy.IP(cidr) + fixed_net_v6 = IPy.IP(cidr_v6) + network_size_v6 = 1 << 64 + significant_bits_v6 = 64 for index in range(num_networks): vlan = vlan_start + index start = index * network_size + start_v6 = index * network_size_v6 significant_bits = 32 - int(math.log(network_size, 2)) cidr = "%s/%s" % (fixed_net[start], significant_bits) project_net = IPy.IP(cidr) @@ -498,6 +516,11 @@ class VlanManager(NetworkManager): net['dhcp_start'] = str(project_net[3]) net['vlan'] = vlan net['bridge'] = 'br%s' % vlan + if(FLAGS.use_ipv6): + cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6], + significant_bits_v6) + net['cidr_v6'] = cidr_v6 + # NOTE(vish): This makes ports unique accross the cloud, a more # robust solution would be to make them unique per ip net['vpn_public_port'] = vpn_start + index @@ -536,6 +559,7 @@ class VlanManager(NetworkManager): self.driver.ensure_vlan_bridge(network_ref['vlan'], network_ref['bridge'], network_ref) + # NOTE(vish): only ensure this forward if the address hasn't been set # manually. if address == FLAGS.vpn_ip: @@ -544,6 +568,8 @@ class VlanManager(NetworkManager): network_ref['vpn_private_address']) if not FLAGS.fake_network: self.driver.update_dhcp(context, network_id) + if(FLAGS.use_ipv6): + self.driver.update_ra(context, network_id) @property def _bottom_reserved_ips(self): diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index 52257f69f..bc26fd3c5 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -39,7 +39,6 @@ S3 client with this module:: import datetime import json -import logging import multiprocessing import os import urllib @@ -54,12 +53,14 @@ from twisted.web import static from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth import manager from nova.objectstore import bucket from nova.objectstore import image +LOG = logging.getLogger('nova.objectstore.handler') FLAGS = flags.FLAGS flags.DEFINE_string('s3_listen_host', '', 'Host to listen on.') @@ -132,9 +133,11 @@ def get_context(request): request.uri, headers=request.getAllHeaders(), check_type='s3') - return context.RequestContext(user, project) + rv = context.RequestContext(user, project) + LOG.audit(_("Authenticated request"), context=rv) + return rv except exception.Error as ex: - logging.debug(_("Authentication Failure: %s"), ex) + LOG.debug(_("Authentication Failure: %s"), ex) raise exception.NotAuthorized() @@ -176,7 +179,7 @@ class S3(ErrorHandlingResource): def render_GET(self, request): # pylint: disable-msg=R0201 """Renders the GET request for a list of buckets as XML""" - logging.debug('List of buckets requested') + LOG.debug(_('List of buckets requested'), context=request.context) buckets = [b for b in bucket.Bucket.all() \ if b.is_authorized(request.context)] @@ -203,7 +206,7 @@ class BucketResource(ErrorHandlingResource): def render_GET(self, request): "Returns the keys for the bucket resource""" - logging.debug("List keys for bucket %s", self.name) + LOG.debug(_("List keys for bucket %s"), self.name) try: bucket_object = bucket.Bucket(self.name) @@ -211,6 +214,8 @@ class BucketResource(ErrorHandlingResource): return error.NoResource(message="No such bucket").render(request) if not bucket_object.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to access bucket %s"), + self.name, context=request.context) raise exception.NotAuthorized() prefix = get_argument(request, "prefix", u"") @@ -227,8 +232,8 @@ class BucketResource(ErrorHandlingResource): def render_PUT(self, request): "Creates the bucket resource""" - logging.debug(_("Creating bucket %s"), self.name) - logging.debug("calling bucket.Bucket.create(%r, %r)", + LOG.debug(_("Creating bucket %s"), self.name) + LOG.debug("calling bucket.Bucket.create(%r, %r)", self.name, request.context) bucket.Bucket.create(self.name, request.context) @@ -237,10 +242,12 @@ class BucketResource(ErrorHandlingResource): def render_DELETE(self, request): """Deletes the bucket resource""" - logging.debug(_("Deleting bucket %s"), self.name) + LOG.debug(_("Deleting bucket %s"), self.name) bucket_object = bucket.Bucket(self.name) if not bucket_object.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to delete bucket %s"), + self.name, context=request.context) raise exception.NotAuthorized() bucket_object.delete() @@ -261,11 +268,12 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug(_("Getting object: %s / %s"), - self.bucket.name, - self.name) + LOG.debug(_("Getting object: %s / %s"), self.bucket.name, self.name) if not self.bucket.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to get object %s from bucket " + "%s"), self.name, self.bucket.name, + context=request.context) raise exception.NotAuthorized() obj = self.bucket[urllib.unquote(self.name)] @@ -281,11 +289,12 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug(_("Putting object: %s / %s"), - self.bucket.name, - self.name) + LOG.debug(_("Putting object: %s / %s"), self.bucket.name, self.name) if not self.bucket.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to upload object %s to bucket " + "%s"), + self.name, self.bucket.name, context=request.context) raise exception.NotAuthorized() key = urllib.unquote(self.name) @@ -302,11 +311,13 @@ class ObjectResource(ErrorHandlingResource): authorized to delete the object. """ - logging.debug(_("Deleting object: %s / %s"), - self.bucket.name, - self.name) + LOG.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name, + context=request.context) if not self.bucket.is_authorized(request.context): + LOG.audit("Unauthorized attempt to delete object %s from " + "bucket %s", self.name, self.bucket.name, + context=request.context) raise exception.NotAuthorized() del self.bucket[urllib.unquote(self.name)] @@ -379,13 +390,21 @@ class ImagesResource(resource.Resource): image_path = os.path.join(FLAGS.images_path, image_id) if not image_path.startswith(FLAGS.images_path) or \ os.path.exists(image_path): + LOG.audit(_("Not authorized to upload image: invalid directory " + "%s"), + image_path, context=request.context) raise exception.NotAuthorized() bucket_object = bucket.Bucket(image_location.split("/")[0]) if not bucket_object.is_authorized(request.context): + LOG.audit(_("Not authorized to upload image: unauthorized " + "bucket %s"), bucket_object.name, + context=request.context) raise exception.NotAuthorized() + LOG.audit(_("Starting image upload: %s"), image_id, + context=request.context) p = multiprocessing.Process(target=image.Image.register_aws_image, args=(image_id, image_location, request.context)) p.start() @@ -398,17 +417,21 @@ class ImagesResource(resource.Resource): image_id = get_argument(request, 'image_id', u'') image_object = image.Image(image_id) if not image_object.is_authorized(request.context): - logging.debug(_("not authorized for render_POST in images")) + LOG.audit(_("Not authorized to update attributes of image %s"), + image_id, context=request.context) raise exception.NotAuthorized() operation = get_argument(request, 'operation', u'') if operation: # operation implies publicity toggle - logging.debug(_("handling publicity toggle")) - image_object.set_public(operation == 'add') + newstatus = (operation == 'add') + LOG.audit(_("Toggling publicity flag of image %s %r"), image_id, + newstatus, context=request.context) + image_object.set_public(newstatus) else: # other attributes imply update - logging.debug(_("update user fields")) + LOG.audit(_("Updating user fields on image %s"), image_id, + context=request.context) clean_args = {} for arg in request.args.keys(): clean_args[arg] = request.args[arg][0] @@ -421,9 +444,12 @@ class ImagesResource(resource.Resource): image_object = image.Image(image_id) if not image_object.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to delete image %s"), + image_id, context=request.context) raise exception.NotAuthorized() image_object.delete() + LOG.audit(_("Deleted image: %s"), image_id, context=request.context) request.setResponseCode(204) return '' diff --git a/nova/rpc.py b/nova/rpc.py index 844088348..49b11602b 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -22,7 +22,6 @@ No fan-out support yet. """ import json -import logging import sys import time import traceback @@ -36,13 +35,12 @@ from nova import context from nova import exception from nova import fakerabbit from nova import flags +from nova import log as logging from nova import utils FLAGS = flags.FLAGS - -LOG = logging.getLogger('amqplib') -LOG.setLevel(logging.DEBUG) +LOG = logging.getLogger('nova.rpc') class Connection(carrot_connection.BrokerConnection): @@ -91,15 +89,16 @@ class Consumer(messaging.Consumer): self.failed_connection = False break except: # Catching all because carrot sucks - logging.exception(_("AMQP server on %s:%d is unreachable." - " Trying again in %d seconds.") % ( - FLAGS.rabbit_host, - FLAGS.rabbit_port, - FLAGS.rabbit_retry_interval)) + LOG.exception(_("AMQP server on %s:%d is unreachable." + " Trying again in %d seconds.") % ( + FLAGS.rabbit_host, + FLAGS.rabbit_port, + FLAGS.rabbit_retry_interval)) self.failed_connection = True if self.failed_connection: - logging.exception(_("Unable to connect to AMQP server" - " after %d tries. Shutting down.") % FLAGS.rabbit_max_retries) + LOG.exception(_("Unable to connect to AMQP server " + "after %d tries. Shutting down."), + FLAGS.rabbit_max_retries) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): @@ -116,14 +115,14 @@ class Consumer(messaging.Consumer): self.declare() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) if self.failed_connection: - logging.error(_("Reconnected to queue")) + LOG.error(_("Reconnected to queue")) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't # exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: - logging.exception(_("Failed to fetch message from queue")) + LOG.exception(_("Failed to fetch message from queue")) self.failed_connection = True def attach_to_eventlet(self): @@ -193,6 +192,7 @@ class AdapterConsumer(TopicConsumer): if msg_id: msg_reply(msg_id, rval, None) except Exception as e: + logging.exception("Exception during message handling") if msg_id: msg_reply(msg_id, None, sys.exc_info()) return @@ -242,8 +242,8 @@ def msg_reply(msg_id, reply=None, failure=None): if failure: message = str(failure[1]) tb = traceback.format_exception(*failure) - logging.error(_("Returning exception %s to caller"), message) - logging.error(tb) + LOG.error(_("Returning exception %s to caller"), message) + LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) conn = Connection.instance(True) publisher = DirectPublisher(connection=conn, msg_id=msg_id) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 44e21f2fd..a4d6dd574 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -21,15 +21,16 @@ Scheduler Service """ -import logging import functools from nova import db from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils +LOG = logging.getLogger('nova.scheduler.manager') FLAGS = flags.FLAGS flags.DEFINE_string('scheduler_driver', 'nova.scheduler.chance.ChanceScheduler', @@ -65,4 +66,4 @@ class SchedulerManager(manager.Manager): db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) - logging.debug(_("Casting to %s %s for %s"), topic, host, method) + LOG.debug(_("Casting to %s %s for %s"), topic, host, method) diff --git a/nova/scheduler/zone.py b/nova/scheduler/zone.py new file mode 100644 index 000000000..49786cd32 --- /dev/null +++ b/nova/scheduler/zone.py @@ -0,0 +1,56 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Availability Zone Scheduler implementation +""" + +import random + +from nova.scheduler import driver +from nova import db + + +class ZoneScheduler(driver.Scheduler): + """Implements Scheduler as a random node selector.""" + + def hosts_up_with_zone(self, context, topic, zone): + """Return the list of hosts that have a running service + for topic and availability zone (if defined). + """ + + if zone is None: + return self.hosts_up(context, topic) + + services = db.service_get_all_by_topic(context, topic) + return [service.host + for service in services + if self.service_is_up(service) + and service.availability_zone == zone] + + def schedule(self, context, topic, *_args, **_kwargs): + """Picks a host that is up at random in selected + availability zone (if defined). + """ + + zone = _kwargs.get('availability_zone') + hosts = self.hosts_up_with_zone(context, topic, zone) + if not hosts: + raise driver.NoValidHost(_("No hosts found")) + return hosts[int(random.random() * len(hosts))] diff --git a/nova/service.py b/nova/service.py index 7203430c6..efc08fd63 100644 --- a/nova/service.py +++ b/nova/service.py @@ -21,7 +21,6 @@ Generic Node baseclass for all workers that run on hosts """ import inspect -import logging import os import sys import time @@ -35,10 +34,10 @@ from sqlalchemy.exc import OperationalError from nova import context from nova import db from nova import exception +from nova import log as logging from nova import flags from nova import rpc from nova import utils -from nova.db.sqlalchemy import models FLAGS = flags.FLAGS @@ -114,11 +113,13 @@ class Service(object): self.timers.append(periodic) def _create_service_ref(self, context): + zone = FLAGS.node_availability_zone service_ref = db.service_create(context, {'host': self.host, 'binary': self.binary, 'topic': self.topic, - 'report_count': 0}) + 'report_count': 0, + 'availability_zone': zone}) self.service_id = service_ref['id'] def __getattr__(self, key): @@ -155,7 +156,7 @@ class Service(object): report_interval = FLAGS.report_interval if not periodic_interval: periodic_interval = FLAGS.periodic_interval - logging.warn(_("Starting %s node"), topic) + logging.audit(_("Starting %s node"), topic) service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) @@ -208,30 +209,16 @@ class Service(object): self.model_disconnected = True logging.exception(_("model server went away")) - try: - models.register_models() - except OperationalError: - logging.exception(_("Data store is unreachable." - " Trying again in %d seconds.") % - FLAGS.sql_retry_interval) - time.sleep(FLAGS.sql_retry_interval) - def serve(*services): - argv = FLAGS(sys.argv) + FLAGS(sys.argv) + logging.basicConfig() if not services: services = [Service.create()] name = '_'.join(x.binary for x in services) - logging.debug("Serving %s" % name) - - logging.getLogger('amqplib').setLevel(logging.WARN) - - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - else: - logging.getLogger().setLevel(logging.WARNING) + logging.debug(_("Serving %s"), name) logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: diff --git a/nova/test.py b/nova/test.py index db5826c04..881baccd5 100644 --- a/nova/test.py +++ b/nova/test.py @@ -23,14 +23,10 @@ and some black magic for inline callbacks. """ import datetime -import sys -import time import unittest import mox import stubout -from twisted.internet import defer -from twisted.trial import unittest as trial_unittest from nova import context from nova import db @@ -74,7 +70,8 @@ class TestCase(unittest.TestCase): FLAGS.fixed_range, 5, 16, FLAGS.vlan_start, - FLAGS.vpn_start) + FLAGS.vpn_start, + FLAGS.fixed_range_v6) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators @@ -139,95 +136,3 @@ class TestCase(unittest.TestCase): _wrapped.func_name = self.originalAttach.func_name rpc.Consumer.attach_to_eventlet = _wrapped - - -class TrialTestCase(trial_unittest.TestCase): - """Test case base class for all unit tests""" - def setUp(self): - """Run before each test method to initialize test environment""" - super(TrialTestCase, self).setUp() - # NOTE(vish): We need a better method for creating fixtures for tests - # now that we have some required db setup for the system - # to work properly. - self.start = datetime.datetime.utcnow() - ctxt = context.get_admin_context() - if db.network_count(ctxt) != 5: - network_manager.VlanManager().create_networks(ctxt, - FLAGS.fixed_range, - 5, 16, - FLAGS.vlan_start, - FLAGS.vpn_start) - - # emulate some of the mox stuff, we can't use the metaclass - # because it screws with our generators - self.mox = mox.Mox() - self.stubs = stubout.StubOutForTesting() - self.flag_overrides = {} - self.injected = [] - self._original_flags = FLAGS.FlagValuesDict() - - def tearDown(self): - """Runs after each test method to finalize/tear down test - environment.""" - try: - self.mox.UnsetStubs() - self.stubs.UnsetAll() - self.stubs.SmartUnsetAll() - self.mox.VerifyAll() - # NOTE(vish): Clean up any ips associated during the test. - ctxt = context.get_admin_context() - db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host, - self.start) - db.network_disassociate_all(ctxt) - for x in self.injected: - try: - x.stop() - except AssertionError: - pass - - if FLAGS.fake_rabbit: - fakerabbit.reset_all() - - db.security_group_destroy_all(ctxt) - super(TrialTestCase, self).tearDown() - finally: - self.reset_flags() - - def flags(self, **kw): - """Override flag variables for a test""" - for k, v in kw.iteritems(): - if k in self.flag_overrides: - self.reset_flags() - raise Exception( - 'trying to override already overriden flag: %s' % k) - self.flag_overrides[k] = getattr(FLAGS, k) - setattr(FLAGS, k, v) - - def reset_flags(self): - """Resets all flag variables for the test. Runs after each test""" - FLAGS.Reset() - for k, v in self._original_flags.iteritems(): - setattr(FLAGS, k, v) - - def run(self, result=None): - test_method = getattr(self, self._testMethodName) - setattr(self, - self._testMethodName, - self._maybeInlineCallbacks(test_method, result)) - rv = super(TrialTestCase, self).run(result) - setattr(self, self._testMethodName, test_method) - return rv - - def _maybeInlineCallbacks(self, func, result): - def _wrapped(): - g = func() - if isinstance(g, defer.Deferred): - return g - if not hasattr(g, 'send'): - return defer.succeed(g) - - inlined = defer.inlineCallbacks(func) - d = inlined() - return d - _wrapped.func_name = func.func_name - return _wrapped diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index 8dc87d0e2..592d5bea9 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -34,3 +34,8 @@ # The code below enables nosetests to work with i18n _() blocks import __builtin__ setattr(__builtin__, '_', lambda x: x) + + +def setup(): + from nova.db import migration + migration.db_sync() diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py index 9e183bd0d..14eaaa62c 100644 --- a/nova/tests/api/openstack/__init__.py +++ b/nova/tests/api/openstack/__init__.py @@ -15,23 +15,28 @@ # License for the specific language governing permissions and limitations # under the License. +import webob.dec import unittest from nova import context from nova import flags from nova.api.openstack.ratelimiting import RateLimitingMiddleware from nova.api.openstack.common import limited -from nova.tests.api.fakes import APIStub -from nova import utils +from nova.tests.api.openstack import fakes from webob import Request FLAGS = flags.FLAGS +@webob.dec.wsgify +def simple_wsgi(req): + return "" + + class RateLimitingMiddlewareTest(unittest.TestCase): def test_get_action_name(self): - middleware = RateLimitingMiddleware(APIStub()) + middleware = RateLimitingMiddleware(simple_wsgi) def verify(method, url, action_name): req = Request.blank(url) @@ -61,19 +66,19 @@ class RateLimitingMiddlewareTest(unittest.TestCase): self.assertTrue('Retry-After' in resp.headers) def test_single_action(self): - middleware = RateLimitingMiddleware(APIStub()) + middleware = RateLimitingMiddleware(simple_wsgi) self.exhaust(middleware, 'DELETE', '/servers/4', 'usr1', 100) self.exhaust(middleware, 'DELETE', '/servers/4', 'usr2', 100) def test_POST_servers_action_implies_POST_action(self): - middleware = RateLimitingMiddleware(APIStub()) + middleware = RateLimitingMiddleware(simple_wsgi) self.exhaust(middleware, 'POST', '/servers/4', 'usr1', 10) self.exhaust(middleware, 'POST', '/images/4', 'usr2', 10) self.assertTrue(set(middleware.limiter._levels) == \ set(['usr1:POST', 'usr1:POST servers', 'usr2:POST'])) def test_POST_servers_action_correctly_ratelimited(self): - middleware = RateLimitingMiddleware(APIStub()) + middleware = RateLimitingMiddleware(simple_wsgi) # Use up all of our "POST" allowance for the minute, 5 times for i in range(5): self.exhaust(middleware, 'POST', '/servers/4', 'usr1', 10) @@ -83,9 +88,9 @@ class RateLimitingMiddlewareTest(unittest.TestCase): self.exhaust(middleware, 'POST', '/servers/4', 'usr1', 0) def test_proxy_ctor_works(self): - middleware = RateLimitingMiddleware(APIStub()) + middleware = RateLimitingMiddleware(simple_wsgi) self.assertEqual(middleware.limiter.__class__.__name__, "Limiter") - middleware = RateLimitingMiddleware(APIStub(), service_host='foobar') + middleware = RateLimitingMiddleware(simple_wsgi, service_host='foobar') self.assertEqual(middleware.limiter.__class__.__name__, "WSGIAppProxy") diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 291a0e468..fb282f1c9 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -22,6 +22,9 @@ import string import webob import webob.dec +from paste import urlmap + +from glance import client as glance_client from nova import auth from nova import context @@ -29,6 +32,7 @@ from nova import exception as exc from nova import flags from nova import utils import nova.api.openstack.auth +from nova.api import openstack from nova.api.openstack import auth from nova.api.openstack import ratelimiting from nova.image import glance @@ -69,6 +73,17 @@ def fake_wsgi(self, req): return self.application +def wsgi_app(inner_application=None): + if not inner_application: + inner_application = openstack.APIRouter() + mapper = urlmap.URLMap() + api = openstack.FaultWrapper(auth.AuthMiddleware( + ratelimiting.RateLimitingMiddleware(inner_application))) + mapper['/v1.0'] = api + mapper['/'] = openstack.FaultWrapper(openstack.Versions()) + return mapper + + def stub_out_key_pair_funcs(stubs): def key_pair(context, user_id): return [dict(name='key', public_key='public_key')] @@ -107,7 +122,7 @@ def stub_out_rate_limiting(stubs): def stub_out_networking(stubs): def get_my_ip(): return '127.0.0.1' - stubs.Set(nova.utils, 'get_my_ip', get_my_ip) + stubs.Set(nova.flags, '_get_my_ip', get_my_ip) def stub_out_compute_api_snapshot(stubs): @@ -116,64 +131,60 @@ def stub_out_compute_api_snapshot(stubs): stubs.Set(nova.compute.API, 'snapshot', snapshot) -def stub_out_glance(stubs, initial_fixtures=[]): +def stub_out_glance(stubs, initial_fixtures=None): - class FakeParallaxClient: + class FakeGlanceClient: def __init__(self, initial_fixtures): - self.fixtures = initial_fixtures + self.fixtures = initial_fixtures or [] - def fake_get_image_index(self): + def fake_get_images(self): return [dict(id=f['id'], name=f['name']) for f in self.fixtures] - def fake_get_image_details(self): + def fake_get_images_detailed(self): return self.fixtures - def fake_get_image_metadata(self, image_id): + def fake_get_image_meta(self, image_id): for f in self.fixtures: if f['id'] == image_id: return f return None - def fake_add_image_metadata(self, image_data): + def fake_add_image(self, image_meta): id = ''.join(random.choice(string.letters) for _ in range(20)) - image_data['id'] = id - self.fixtures.append(image_data) + image_meta['id'] = id + self.fixtures.append(image_meta) return id - def fake_update_image_metadata(self, image_id, image_data): - f = self.fake_get_image_metadata(image_id) + def fake_update_image(self, image_id, image_meta): + f = self.fake_get_image_meta(image_id) if not f: raise exc.NotFound - f.update(image_data) + f.update(image_meta) - def fake_delete_image_metadata(self, image_id): - f = self.fake_get_image_metadata(image_id) + def fake_delete_image(self, image_id): + f = self.fake_get_image_meta(image_id) if not f: raise exc.NotFound self.fixtures.remove(f) - def fake_delete_all(self): - self.fixtures = [] - - fake_parallax_client = FakeParallaxClient(initial_fixtures) - stubs.Set(nova.image.glance.ParallaxClient, 'get_image_index', - fake_parallax_client.fake_get_image_index) - stubs.Set(nova.image.glance.ParallaxClient, 'get_image_details', - fake_parallax_client.fake_get_image_details) - stubs.Set(nova.image.glance.ParallaxClient, 'get_image_metadata', - fake_parallax_client.fake_get_image_metadata) - stubs.Set(nova.image.glance.ParallaxClient, 'add_image_metadata', - fake_parallax_client.fake_add_image_metadata) - stubs.Set(nova.image.glance.ParallaxClient, 'update_image_metadata', - fake_parallax_client.fake_update_image_metadata) - stubs.Set(nova.image.glance.ParallaxClient, 'delete_image_metadata', - fake_parallax_client.fake_delete_image_metadata) - stubs.Set(nova.image.glance.GlanceImageService, 'delete_all', - fake_parallax_client.fake_delete_all) + ##def fake_delete_all(self): + ## self.fixtures = [] + + GlanceClient = glance_client.Client + fake = FakeGlanceClient(initial_fixtures) + + stubs.Set(GlanceClient, 'get_images', fake.fake_get_images) + stubs.Set(GlanceClient, 'get_images_detailed', + fake.fake_get_images_detailed) + stubs.Set(GlanceClient, 'get_image_meta', fake.fake_get_image_meta) + stubs.Set(GlanceClient, 'add_image', fake.fake_add_image) + stubs.Set(GlanceClient, 'update_image', fake.fake_update_image) + stubs.Set(GlanceClient, 'delete_image', fake.fake_delete_image) + #stubs.Set(GlanceClient, 'delete_all', fake.fake_delete_all) class FakeToken(object): diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py index 1b2e1654d..73120c31d 100644 --- a/nova/tests/api/openstack/test_adminapi.py +++ b/nova/tests/api/openstack/test_adminapi.py @@ -19,15 +19,19 @@ import unittest import stubout import webob +from paste import urlmap -import nova.api from nova import flags +from nova.api import openstack +from nova.api.openstack import ratelimiting +from nova.api.openstack import auth from nova.tests.api.openstack import fakes FLAGS = flags.FLAGS class AdminAPITest(unittest.TestCase): + def setUp(self): self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} @@ -45,7 +49,7 @@ class AdminAPITest(unittest.TestCase): FLAGS.allow_admin_api = True # We should still be able to access public operations. req = webob.Request.blank('/v1.0/flavors') - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) # TODO: Confirm admin operations are available. @@ -53,7 +57,7 @@ class AdminAPITest(unittest.TestCase): FLAGS.allow_admin_api = False # We should still be able to access public operations. req = webob.Request.blank('/v1.0/flavors') - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) # TODO: Confirm admin operations are unavailable. diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py index d8b202e21..db0fe1060 100644 --- a/nova/tests/api/openstack/test_api.py +++ b/nova/tests/api/openstack/test_api.py @@ -19,14 +19,18 @@ import unittest import webob.exc import webob.dec -import nova.api.openstack -from nova.api.openstack import API -from nova.api.openstack import faults from webob import Request +from nova.api import openstack +from nova.api.openstack import faults + class APITest(unittest.TestCase): + def _wsgi_app(self, inner_app): + # simpler version of the app than fakes.wsgi_app + return openstack.FaultWrapper(inner_app) + def test_exceptions_are_converted_to_faults(self): @webob.dec.wsgify @@ -46,29 +50,32 @@ class APITest(unittest.TestCase): exc = webob.exc.HTTPNotFound(explanation='Raised a webob.exc') return faults.Fault(exc) - api = API() - - api.application = succeed + #api.application = succeed + api = self._wsgi_app(succeed) resp = Request.blank('/').get_response(api) self.assertFalse('computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 200, resp.body) - api.application = raise_webob_exc + #api.application = raise_webob_exc + api = self._wsgi_app(raise_webob_exc) resp = Request.blank('/').get_response(api) self.assertFalse('computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 404, resp.body) - api.application = raise_api_fault + #api.application = raise_api_fault + api = self._wsgi_app(raise_api_fault) resp = Request.blank('/').get_response(api) self.assertTrue('itemNotFound' in resp.body, resp.body) self.assertEqual(resp.status_int, 404, resp.body) - api.application = fail + #api.application = fail + api = self._wsgi_app(fail) resp = Request.blank('/').get_response(api) self.assertTrue('{"computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 500, resp.body) - api.application = fail + #api.application = fail + api = self._wsgi_app(fail) resp = Request.blank('/.xml').get_response(api) self.assertTrue('<computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 500, resp.body) diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 489a1dfbf..0dd65d321 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -53,7 +53,7 @@ class Test(unittest.TestCase): req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' req.headers['X-Auth-Key'] = 'derp' - result = req.get_response(nova.api.API('os')) + result = req.get_response(fakes.wsgi_app()) self.assertEqual(result.status, '204 No Content') self.assertEqual(len(result.headers['X-Auth-Token']), 40) self.assertEqual(result.headers['X-CDN-Management-Url'], @@ -67,7 +67,7 @@ class Test(unittest.TestCase): req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'herp' req.headers['X-Auth-Key'] = 'derp' - result = req.get_response(nova.api.API('os')) + result = req.get_response(fakes.wsgi_app()) self.assertEqual(result.status, '204 No Content') self.assertEqual(len(result.headers['X-Auth-Token']), 40) self.assertEqual(result.headers['X-Server-Management-Url'], @@ -81,7 +81,7 @@ class Test(unittest.TestCase): fakes.FakeRouter) req = webob.Request.blank('/v1.0/fake') req.headers['X-Auth-Token'] = token - result = req.get_response(nova.api.API('os')) + result = req.get_response(fakes.wsgi_app()) self.assertEqual(result.status, '200 OK') self.assertEqual(result.headers['X-Test-Success'], 'True') @@ -105,7 +105,7 @@ class Test(unittest.TestCase): req = webob.Request.blank('/v1.0/') req.headers['X-Auth-Token'] = 'bacon' - result = req.get_response(nova.api.API('os')) + result = req.get_response(fakes.wsgi_app()) self.assertEqual(result.status, '401 Unauthorized') self.assertEqual(self.destroy_called, True) @@ -113,18 +113,18 @@ class Test(unittest.TestCase): req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' req.headers['X-Auth-Key'] = 'derp' - result = req.get_response(nova.api.API('os')) + result = req.get_response(fakes.wsgi_app()) self.assertEqual(result.status, '401 Unauthorized') def test_no_user(self): req = webob.Request.blank('/v1.0/') - result = req.get_response(nova.api.API('os')) + result = req.get_response(fakes.wsgi_app()) self.assertEqual(result.status, '401 Unauthorized') def test_bad_token(self): req = webob.Request.blank('/v1.0/') req.headers['X-Auth-Token'] = 'baconbaconbacon' - result = req.get_response(nova.api.API('os')) + result = req.get_response(fakes.wsgi_app()) self.assertEqual(result.status, '401 Unauthorized') @@ -149,7 +149,7 @@ class TestLimiter(unittest.TestCase): req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' req.headers['X-Auth-Key'] = 'derp' - result = req.get_response(nova.api.API('os')) + result = req.get_response(fakes.wsgi_app()) self.assertEqual(len(result.headers['X-Auth-Token']), 40) token = result.headers['X-Auth-Token'] @@ -158,7 +158,7 @@ class TestLimiter(unittest.TestCase): req = webob.Request.blank('/v1.0/fake') req.method = 'POST' req.headers['X-Auth-Token'] = token - result = req.get_response(nova.api.API('os')) + result = req.get_response(fakes.wsgi_app()) self.assertEqual(result.status, '200 OK') self.assertEqual(result.headers['X-Test-Success'], 'True') diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index 41018afdf..1bdaea161 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -39,7 +39,7 @@ class FlavorsTest(unittest.TestCase): def test_get_flavor_list(self): req = webob.Request.blank('/v1.0/flavors') - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) def test_get_flavor_by_id(self): pass diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 0f274bd15..5d9ddefbe 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -22,7 +22,6 @@ and as a WSGI layer import json import datetime -import logging import unittest import stubout @@ -173,6 +172,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase): IMAGE_FIXTURES = [ {'id': '23g2ogk23k4hhkk4k42l', + 'imageId': '23g2ogk23k4hhkk4k42l', 'name': 'public image #1', 'created_at': str(datetime.datetime.utcnow()), 'updated_at': str(datetime.datetime.utcnow()), @@ -182,6 +182,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase): 'status': 'available', 'image_type': 'kernel'}, {'id': 'slkduhfas73kkaskgdas', + 'imageId': 'slkduhfas73kkaskgdas', 'name': 'public image #2', 'created_at': str(datetime.datetime.utcnow()), 'updated_at': str(datetime.datetime.utcnow()), @@ -209,7 +210,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase): def test_get_image_index(self): req = webob.Request.blank('/v1.0/images') - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) fixture_index = [dict(id=f['id'], name=f['name']) for f @@ -221,7 +222,7 @@ class ImageControllerWithGlanceServiceTest(unittest.TestCase): def test_get_image_details(self): req = webob.Request.blank('/v1.0/images/detail') - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) def _is_equivalent_subset(x, y): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 6e611a55d..29883e7c8 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -100,14 +100,14 @@ class ServersTest(unittest.TestCase): def test_get_server_by_id(self): req = webob.Request.blank('/v1.0/servers/1') - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res_dict['server']['id'], '1') self.assertEqual(res_dict['server']['name'], 'server1') def test_get_server_list(self): req = webob.Request.blank('/v1.0/servers') - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) i = 0 @@ -133,6 +133,12 @@ class ServersTest(unittest.TestCase): def queue_get_for(context, *args): return 'network_topic' + def kernel_ramdisk_mapping(*args, **kwargs): + return (1, 1) + + def image_id_from_hash(*args, **kwargs): + return 2 + self.stubs.Set(nova.db.api, 'project_get_network', project_get_network) self.stubs.Set(nova.db.api, 'instance_create', instance_create) self.stubs.Set(nova.rpc, 'cast', fake_method) @@ -142,6 +148,10 @@ class ServersTest(unittest.TestCase): self.stubs.Set(nova.db.api, 'queue_get_for', queue_get_for) self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip', fake_method) + self.stubs.Set(nova.api.openstack.servers.Controller, + "_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping) + self.stubs.Set(nova.api.openstack.common, + "get_image_id_from_image_hash", image_id_from_hash) body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, @@ -150,14 +160,14 @@ class ServersTest(unittest.TestCase): req.method = 'POST' req.body = json.dumps(body) - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) def test_update_no_body(self): req = webob.Request.blank('/v1.0/servers/1') req.method = 'PUT' - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 422) def test_update_bad_params(self): @@ -176,7 +186,7 @@ class ServersTest(unittest.TestCase): req = webob.Request.blank('/v1.0/servers/1') req.method = 'PUT' req.body = self.body - req.get_response(nova.api.API('os')) + req.get_response(fakes.wsgi_app()) def test_update_server(self): inst_dict = dict(name='server_test', adminPass='bacon') @@ -192,28 +202,28 @@ class ServersTest(unittest.TestCase): req = webob.Request.blank('/v1.0/servers/1') req.method = 'PUT' req.body = self.body - req.get_response(nova.api.API('os')) + req.get_response(fakes.wsgi_app()) def test_create_backup_schedules(self): req = webob.Request.blank('/v1.0/servers/1/backup_schedules') req.method = 'POST' - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status, '404 Not Found') def test_delete_backup_schedules(self): req = webob.Request.blank('/v1.0/servers/1/backup_schedules') req.method = 'DELETE' - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status, '404 Not Found') def test_get_server_backup_schedules(self): req = webob.Request.blank('/v1.0/servers/1/backup_schedules') - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status, '404 Not Found') def test_get_all_server_details(self): req = webob.Request.blank('/v1.0/servers/detail') - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) i = 0 @@ -232,7 +242,7 @@ class ServersTest(unittest.TestCase): req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) def test_server_unpause(self): @@ -244,7 +254,7 @@ class ServersTest(unittest.TestCase): req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) def test_server_suspend(self): @@ -256,7 +266,7 @@ class ServersTest(unittest.TestCase): req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) def test_server_resume(self): @@ -268,19 +278,19 @@ class ServersTest(unittest.TestCase): req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 202) def test_server_diagnostics(self): req = webob.Request.blank("/v1.0/servers/1/diagnostics") req.method = "GET" - res = req.get_response(nova.api.API("os")) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 404) def test_server_actions(self): req = webob.Request.blank("/v1.0/servers/1/actions") req.method = "GET" - res = req.get_response(nova.api.API("os")) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 404) def test_server_reboot(self): @@ -291,7 +301,7 @@ class ServersTest(unittest.TestCase): req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) def test_server_rebuild(self): body = dict(server=dict( @@ -301,7 +311,7 @@ class ServersTest(unittest.TestCase): req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) def test_server_resize(self): body = dict(server=dict( @@ -311,7 +321,7 @@ class ServersTest(unittest.TestCase): req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) def test_delete_server_instance(self): req = webob.Request.blank('/v1.0/servers/1') @@ -325,7 +335,7 @@ class ServersTest(unittest.TestCase): self.stubs.Set(nova.db.api, 'instance_destroy', instance_destroy_mock) - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status, '202 Accepted') self.assertEqual(self.server_delete_called, True) diff --git a/nova/tests/api/openstack/test_sharedipgroups.py b/nova/tests/api/openstack/test_shared_ip_groups.py index d199951d8..c2fc3a203 100644 --- a/nova/tests/api/openstack/test_sharedipgroups.py +++ b/nova/tests/api/openstack/test_shared_ip_groups.py @@ -19,7 +19,7 @@ import unittest import stubout -from nova.api.openstack import sharedipgroups +from nova.api.openstack import shared_ip_groups class SharedIpGroupsTest(unittest.TestCase): diff --git a/nova/tests/api/test.py b/nova/tests/api/test.py deleted file mode 100644 index 9caa8c9d0..000000000 --- a/nova/tests/api/test.py +++ /dev/null @@ -1,81 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test for the root WSGI middleware for all API controllers. -""" - -import unittest - -import stubout -import webob -import webob.dec - -import nova.exception -from nova import api -from nova.tests.api.fakes import APIStub - - -class Test(unittest.TestCase): - - def setUp(self): - self.stubs = stubout.StubOutForTesting() - - def tearDown(self): - self.stubs.UnsetAll() - - def _request(self, url, subdomain, **kwargs): - environ_keys = {'HTTP_HOST': '%s.example.com' % subdomain} - environ_keys.update(kwargs) - req = webob.Request.blank(url, environ_keys) - return req.get_response(api.API('ec2')) - - def test_openstack(self): - self.stubs.Set(api.openstack, 'API', APIStub) - result = self._request('/v1.0/cloud', 'api') - self.assertEqual(result.body, "/cloud") - - def test_ec2(self): - self.stubs.Set(api.ec2, 'API', APIStub) - result = self._request('/services/cloud', 'ec2') - self.assertEqual(result.body, "/cloud") - - def test_not_found(self): - self.stubs.Set(api.ec2, 'API', APIStub) - self.stubs.Set(api.openstack, 'API', APIStub) - result = self._request('/test/cloud', 'ec2') - self.assertNotEqual(result.body, "/cloud") - - def test_query_api_versions(self): - result = self._request('/', 'api') - self.assertTrue('CURRENT' in result.body) - - def test_metadata(self): - def go(url): - result = self._request(url, 'ec2', REMOTE_ADDR='128.192.151.2') - # Each should get to the ORM layer and fail to find the IP - self.assertRaises(nova.exception.NotFound, go, '/latest/') - self.assertRaises(nova.exception.NotFound, go, '/2009-04-04/') - self.assertRaises(nova.exception.NotFound, go, '/1.0/') - - def test_ec2_root(self): - result = self._request('/', 'ec2') - self.assertTrue('2007-12-15\n' in result.body) - - -if __name__ == '__main__': - unittest.main() diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 7376a11dd..1097488ec 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -40,3 +40,4 @@ FLAGS.blades_per_shelf = 4 FLAGS.iscsi_num_targets = 8 FLAGS.verbose = True FLAGS.sql_connection = 'sqlite:///nova.sqlite' +FLAGS.use_ipv6 = True diff --git a/nova/tests/api/fakes.py b/nova/tests/glance/__init__.py index 0aedcaff0..ef9fa05a7 100644 --- a/nova/tests/api/fakes.py +++ b/nova/tests/glance/__init__.py @@ -1,7 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. +# Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -15,12 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. -import webob.dec -from nova import wsgi - - -class APIStub(object): - """Class to verify request and mark it was called.""" - @webob.dec.wsgify - def __call__(self, req): - return req.path_info +""" +:mod:`glance` -- Stubs for Glance +================================= +""" diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py new file mode 100644 index 000000000..f182b857a --- /dev/null +++ b/nova/tests/glance/stubs.py @@ -0,0 +1,37 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import StringIO + +import glance.client + + +def stubout_glance_client(stubs, cls): + """Stubs out glance.client.Client""" + stubs.Set(glance.client, 'Client', + lambda *args, **kwargs: cls(*args, **kwargs)) + + +class FakeGlance(object): + def __init__(self, host, port=None, use_ssl=False): + pass + + def get_image(self, image): + meta = { + 'size': 0, + } + image_file = StringIO.StringIO('') + return meta, image_file diff --git a/nova/tests/hyperv_unittest.py b/nova/tests/hyperv_unittest.py new file mode 100644 index 000000000..3980ae3cb --- /dev/null +++ b/nova/tests/hyperv_unittest.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 Cloud.com, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Hyper-V driver +""" + +import random + +from nova import context +from nova import db +from nova import flags +from nova import test +from nova.auth import manager +from nova.virt import hyperv + +FLAGS = flags.FLAGS +FLAGS.connection_type = 'hyperv' + + +class HyperVTestCase(test.TestCase): + """Test cases for the Hyper-V driver""" + def setUp(self): + super(HyperVTestCase, self).setUp() + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.RequestContext(self.user, self.project) + + def test_create_destroy(self): + """Create a VM and destroy it""" + instance = {'internal_id': random.randint(1, 1000000), + 'memory_mb': '1024', + 'mac_address': '02:12:34:46:56:67', + 'vcpus': 2, + 'project_id': 'fake', + 'instance_type': 'm1.small'} + instance_ref = db.instance_create(self.context, instance) + + conn = hyperv.get_connection(False) + conn._create_vm(instance_ref) # pylint: disable-msg=W0212 + found = [n for n in conn.list_instances() + if n == instance_ref['name']] + self.assertTrue(len(found) == 1) + info = conn.get_info(instance_ref['name']) + #Unfortunately since the vm is not running at this point, + #we cannot obtain memory information from get_info + self.assertEquals(info['num_cpu'], instance_ref['vcpus']) + + conn.destroy(instance_ref) + found = [n for n in conn.list_instances() + if n == instance_ref['name']] + self.assertTrue(len(found) == 0) + + def tearDown(self): + super(HyperVTestCase, self).tearDown() + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py index ceac17adb..da86e6e11 100644 --- a/nova/tests/objectstore_unittest.py +++ b/nova/tests/objectstore_unittest.py @@ -23,7 +23,6 @@ Unittets for S3 objectstore clone. import boto import glob import hashlib -import logging import os import shutil import tempfile @@ -63,7 +62,6 @@ class ObjectStoreTestCase(test.TestCase): self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'), images_path=os.path.join(OSS_TEMPDIR, 'images'), ca_path=os.path.join(os.path.dirname(__file__), 'CA')) - logging.getLogger().setLevel(logging.DEBUG) self.auth_manager = manager.AuthManager() self.auth_manager.create_user('user1') diff --git a/nova/tests/test_access.py b/nova/tests/test_access.py index 58fdea3b5..e170ccee6 100644 --- a/nova/tests/test_access.py +++ b/nova/tests/test_access.py @@ -17,25 +17,34 @@ # under the License. import unittest -import logging import webob from nova import context -from nova import exception from nova import flags from nova import test from nova.api import ec2 from nova.auth import manager - FLAGS = flags.FLAGS -class Context(object): +class FakeControllerClass(object): pass +class FakeApiRequest(object): + def __init__(self, action): + self.controller = FakeControllerClass() + self.action = action + + class AccessTestCase(test.TestCase): + def _env_for(self, ctxt, action): + env = {} + env['ec2.context'] = ctxt + env['ec2.request'] = FakeApiRequest(action) + return env + def setUp(self): super(AccessTestCase, self).setUp() um = manager.AuthManager() @@ -65,7 +74,7 @@ class AccessTestCase(test.TestCase): return [''] self.mw = ec2.Authorizer(noopWSGIApp) - self.mw.action_roles = {'str': { + self.mw.action_roles = {'FakeControllerClass': { '_allow_all': ['all'], '_allow_none': [], '_allow_project_manager': ['projectmanager'], @@ -85,9 +94,7 @@ class AccessTestCase(test.TestCase): def response_status(self, user, methodName): ctxt = context.RequestContext(user, self.project) - environ = {'ec2.context': ctxt, - 'ec2.controller': 'some string', - 'ec2.action': methodName} + environ = self._env_for(ctxt, methodName) req = webob.Request.blank('/', environ) resp = req.get_response(self.mw) return resp.status_int diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 33d4cb294..66a16b0cb 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -26,9 +26,8 @@ import StringIO import webob from nova import context -from nova import flags from nova import test -from nova import api +from nova.api import ec2 from nova.api.ec2 import cloud from nova.api.ec2 import apirequest from nova.auth import manager @@ -79,7 +78,7 @@ class FakeHttplibConnection(object): pass -class XmlConversionTestCase(test.TrialTestCase): +class XmlConversionTestCase(test.TestCase): """Unit test api xml conversion""" def test_number_conversion(self): conv = apirequest._try_convert @@ -96,16 +95,14 @@ class XmlConversionTestCase(test.TrialTestCase): self.assertEqual(conv('-0'), 0) -class ApiEc2TestCase(test.TrialTestCase): +class ApiEc2TestCase(test.TestCase): """Unit test for the cloud controller on an EC2 API""" def setUp(self): super(ApiEc2TestCase, self).setUp() - self.manager = manager.AuthManager() - self.host = '127.0.0.1' - - self.app = api.API('ec2') + self.app = ec2.Authenticate(ec2.Requestify(ec2.Executor(), + 'nova.api.ec2.cloud.CloudController')) def expect_http(self, host=None, is_secure=False): """Returns a new EC2 connection""" @@ -265,6 +262,72 @@ class ApiEc2TestCase(test.TrialTestCase): return + def test_authorize_revoke_security_group_cidr_v6(self): + """ + Test that we can add and remove CIDR based rules + to a security group for IPv6 + """ + self.expect_http() + self.mox.ReplayAll() + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + + # At the moment, you need both of these to actually be netadmin + self.manager.add_role('fake', 'netadmin') + project.add_role('fake', 'netadmin') + + security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") + for x in range(random.randint(4, 8))) + + group = self.ec2.create_security_group(security_group_name, + 'test group') + + self.expect_http() + self.mox.ReplayAll() + group.connection = self.ec2 + + group.authorize('tcp', 80, 81, '::/0') + + self.expect_http() + self.mox.ReplayAll() + + rv = self.ec2.get_all_security_groups() + # I don't bother checkng that we actually find it here, + # because the create/delete unit test further up should + # be good enough for that. + for group in rv: + if group.name == security_group_name: + self.assertEquals(len(group.rules), 1) + self.assertEquals(int(group.rules[0].from_port), 80) + self.assertEquals(int(group.rules[0].to_port), 81) + self.assertEquals(len(group.rules[0].grants), 1) + self.assertEquals(str(group.rules[0].grants[0]), '::/0') + + self.expect_http() + self.mox.ReplayAll() + group.connection = self.ec2 + + group.revoke('tcp', 80, 81, '::/0') + + self.expect_http() + self.mox.ReplayAll() + + self.ec2.delete_security_group(security_group_name) + + self.expect_http() + self.mox.ReplayAll() + group.connection = self.ec2 + + rv = self.ec2.get_all_security_groups() + + self.assertEqual(len(rv), 1) + self.assertEqual(rv[0].name, 'default') + + self.manager.delete_project(project) + self.manager.delete_user(user) + + return + def test_authorize_revoke_security_group_foreign_group(self): """ Test that we can grant and revoke another security group access diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index 15d40bc53..35ffffb67 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -16,17 +16,18 @@ # License for the specific language governing permissions and limitations # under the License. -import logging from M2Crypto import X509 import unittest from nova import crypto from nova import flags +from nova import log as logging from nova import test from nova.auth import manager from nova.api.ec2 import cloud FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.auth_unittest') class user_generator(object): @@ -211,12 +212,12 @@ class AuthManagerTestCase(object): # NOTE(vish): Setup runs genroot.sh if it hasn't been run cloud.CloudController().setup() _key, cert_str = crypto.generate_x509_cert(user.id, project.id) - logging.debug(cert_str) + LOG.debug(cert_str) full_chain = crypto.fetch_ca(project_id=project.id, chain=True) int_cert = crypto.fetch_ca(project_id=project.id, chain=False) cloud_cert = crypto.fetch_ca() - logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain) + LOG.debug("CA chain:\n\n =====\n%s\n\n=====", full_chain) signed_cert = X509.load_cert_string(cert_str) chain_cert = X509.load_cert_string(full_chain) int_cert = X509.load_cert_string(int_cert) @@ -331,7 +332,7 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase): test.TestCase.__init__(self, *args, **kwargs) import nova.auth.fakeldap as fakeldap if FLAGS.flush_db: - logging.info("Flushing datastore") + LOG.info("Flushing datastore") r = fakeldap.Store.instance() r.flushdb() diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 42344af1c..771b1fcc0 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -18,10 +18,10 @@ from base64 import b64decode import json -import logging from M2Crypto import BIO from M2Crypto import RSA import os +import shutil import tempfile import time @@ -31,6 +31,7 @@ from nova import context from nova import crypto from nova import db from nova import flags +from nova import log as logging from nova import rpc from nova import service from nova import test @@ -41,6 +42,7 @@ from nova.objectstore import image FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.cloud') # Temp dirs for working with image attributes through the cloud controller # (stole this from objectstore_unittest.py) @@ -49,6 +51,8 @@ IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images') os.makedirs(IMAGES_PATH) +# TODO(termie): these tests are rather fragile, they should at the lest be +# wiping database state after each run class CloudTestCase(test.TestCase): def setUp(self): super(CloudTestCase, self).setUp() @@ -56,7 +60,6 @@ class CloudTestCase(test.TestCase): images_path=IMAGES_PATH) self.conn = rpc.Connection.instance() - logging.getLogger().setLevel(logging.DEBUG) # set up our cloud self.cloud = cloud.CloudController() @@ -126,13 +129,62 @@ class CloudTestCase(test.TestCase): vol2 = db.volume_create(self.context, {}) result = self.cloud.describe_volumes(self.context) self.assertEqual(len(result['volumeSet']), 2) + volume_id = cloud.id_to_ec2_id(vol2['id'], 'vol-%08x') result = self.cloud.describe_volumes(self.context, - volume_id=[vol2['id']]) + volume_id=[volume_id]) self.assertEqual(len(result['volumeSet']), 1) - self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['id']) + self.assertEqual( + cloud.ec2_id_to_id(result['volumeSet'][0]['volumeId']), + vol2['id']) db.volume_destroy(self.context, vol1['id']) db.volume_destroy(self.context, vol2['id']) + def test_describe_availability_zones(self): + """Makes sure describe_availability_zones works and filters results.""" + service1 = db.service_create(self.context, {'host': 'host1_zones', + 'binary': "nova-compute", + 'topic': 'compute', + 'report_count': 0, + 'availability_zone': "zone1"}) + service2 = db.service_create(self.context, {'host': 'host2_zones', + 'binary': "nova-compute", + 'topic': 'compute', + 'report_count': 0, + 'availability_zone': "zone2"}) + result = self.cloud.describe_availability_zones(self.context) + self.assertEqual(len(result['availabilityZoneInfo']), 3) + db.service_destroy(self.context, service1['id']) + db.service_destroy(self.context, service2['id']) + + def test_describe_instances(self): + """Makes sure describe_instances works and filters results.""" + inst1 = db.instance_create(self.context, {'reservation_id': 'a', + 'host': 'host1'}) + inst2 = db.instance_create(self.context, {'reservation_id': 'a', + 'host': 'host2'}) + comp1 = db.service_create(self.context, {'host': 'host1', + 'availability_zone': 'zone1', + 'topic': "compute"}) + comp2 = db.service_create(self.context, {'host': 'host2', + 'availability_zone': 'zone2', + 'topic': "compute"}) + result = self.cloud.describe_instances(self.context) + result = result['reservationSet'][0] + self.assertEqual(len(result['instancesSet']), 2) + instance_id = cloud.id_to_ec2_id(inst2['id']) + result = self.cloud.describe_instances(self.context, + instance_id=[instance_id]) + result = result['reservationSet'][0] + self.assertEqual(len(result['instancesSet']), 1) + self.assertEqual(result['instancesSet'][0]['instanceId'], + instance_id) + self.assertEqual(result['instancesSet'][0] + ['placement']['availabilityZone'], 'zone2') + db.instance_destroy(self.context, inst1['id']) + db.instance_destroy(self.context, inst2['id']) + db.service_destroy(self.context, comp1['id']) + db.service_destroy(self.context, comp2['id']) + def test_console_output(self): image_id = FLAGS.default_image instance_type = FLAGS.default_instance_type @@ -140,11 +192,24 @@ class CloudTestCase(test.TestCase): kwargs = {'image_id': image_id, 'instance_type': instance_type, 'max_count': max_count} + rv = self.cloud.run_instances(self.context, **kwargs) + instance_id = rv['instancesSet'][0]['instanceId'] + output = self.cloud.get_console_output(context=self.context, + instance_id=[instance_id]) + self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT') + # TODO(soren): We need this until we can stop polling in the rpc code + # for unit tests. + greenthread.sleep(0.3) + rv = self.cloud.terminate_instances(self.context, [instance_id]) + + def test_ajax_console(self): + kwargs = {'image_id': image_id} rv = yield self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] output = yield self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) - self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT') + self.assertEquals(b64decode(output['output']), + 'http://fakeajaxconsole.com/?token=FAKETOKEN') # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. greenthread.sleep(0.3) @@ -178,7 +243,7 @@ class CloudTestCase(test.TestCase): def test_run_instances(self): if FLAGS.connection_type == 'fake': - logging.debug("Can't test instances without a real virtual env.") + LOG.debug(_("Can't test instances without a real virtual env.")) return image_id = FLAGS.default_image instance_type = FLAGS.default_instance_type @@ -186,32 +251,46 @@ class CloudTestCase(test.TestCase): kwargs = {'image_id': image_id, 'instance_type': instance_type, 'max_count': max_count} - rv = yield self.cloud.run_instances(self.context, **kwargs) + rv = self.cloud.run_instances(self.context, **kwargs) # TODO: check for proper response instance_id = rv['reservationSet'][0].keys()[0] instance = rv['reservationSet'][0][instance_id][0] - logging.debug("Need to watch instance %s until it's running..." % - instance['instance_id']) + LOG.debug(_("Need to watch instance %s until it's running..."), + instance['instance_id']) while True: greenthread.sleep(1) info = self.cloud._get_instance(instance['instance_id']) - logging.debug(info['state']) + LOG.debug(info['state']) if info['state'] == power_state.RUNNING: break self.assert_(rv) - if connection_type != 'fake': + if FLAGS.connection_type != 'fake': time.sleep(45) # Should use boto for polling here for reservations in rv['reservationSet']: # for res_id in reservations.keys(): - # logging.debug(reservations[res_id]) + # LOG.debug(reservations[res_id]) # for instance in reservations[res_id]: for instance in reservations[reservations.keys()[0]]: instance_id = instance['instance_id'] - logging.debug("Terminating instance %s" % instance_id) - rv = yield self.compute.terminate_instance(instance_id) + LOG.debug(_("Terminating instance %s"), instance_id) + rv = self.compute.terminate_instance(instance_id) + + def test_describe_instances(self): + """Makes sure describe_instances works.""" + instance1 = db.instance_create(self.context, {'host': 'host2'}) + comp1 = db.service_create(self.context, {'host': 'host2', + 'availability_zone': 'zone1', + 'topic': "compute"}) + result = self.cloud.describe_instances(self.context) + self.assertEqual(result['reservationSet'][0] + ['instancesSet'][0] + ['placement']['availabilityZone'], 'zone1') + db.instance_destroy(self.context, instance1['id']) + db.service_destroy(self.context, comp1['id']) def test_instance_update_state(self): + # TODO(termie): what is this code even testing? def instance(num): return { 'reservation_id': 'r-1', @@ -230,7 +309,8 @@ class CloudTestCase(test.TestCase): 'state': 0x01, 'user_data': ''} rv = self.cloud._format_describe_instances(self.context) - self.assert_(len(rv['reservationSet']) == 0) + logging.error(str(rv)) + self.assertEqual(len(rv['reservationSet']), 0) # simulate launch of 5 instances # self.cloud.instances['pending'] = {} @@ -293,6 +373,7 @@ class CloudTestCase(test.TestCase): self.assertEqual('Foo Img', img.metadata['description']) self._fake_set_image_description(self.context, 'ami-testing', '') self.assertEqual('', img.metadata['description']) + shutil.rmtree(pathdir) def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) @@ -313,7 +394,8 @@ class CloudTestCase(test.TestCase): def test_update_of_volume_display_fields(self): vol = db.volume_create(self.context, {}) - self.cloud.update_volume(self.context, vol['id'], + self.cloud.update_volume(self.context, + cloud.id_to_ec2_id(vol['id'], 'vol-%08x'), display_name='c00l v0lum3') vol = db.volume_get(self.context, vol['id']) self.assertEqual('c00l v0lum3', vol['display_name']) @@ -321,8 +403,9 @@ class CloudTestCase(test.TestCase): def test_update_of_volume_wont_update_private_fields(self): vol = db.volume_create(self.context, {}) - self.cloud.update_volume(self.context, vol['id'], - mountpoint='/not/here') + self.cloud.update_volume(self.context, + cloud.id_to_ec2_id(vol['id'], 'vol-%08x'), + mountpoint='/not/here') vol = db.volume_get(self.context, vol['id']) self.assertEqual(None, vol['mountpoint']) db.volume_destroy(self.context, vol['id']) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 1d527b8f0..09f6ee94a 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -20,25 +20,26 @@ Tests For Compute """ import datetime -import logging from nova import compute from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import test from nova import utils from nova.auth import manager +LOG = logging.getLogger('nova.tests.compute') FLAGS = flags.FLAGS +flags.DECLARE('stub_network', 'nova.compute.manager') class ComputeTestCase(test.TestCase): """Test case for compute""" def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) super(ComputeTestCase, self).setUp() self.flags(connection_type='fake', stub_network=True, @@ -75,7 +76,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create(self.context, FLAGS.default_instance_type, None, **instance) try: - self.assertNotEqual(ref[0].display_name, None) + self.assertNotEqual(ref[0]['display_name'], None) finally: db.instance_destroy(self.context, ref[0]['id']) @@ -86,10 +87,14 @@ class ComputeTestCase(test.TestCase): 'user_id': self.user.id, 'project_id': self.project.id} group = db.security_group_create(self.context, values) - ref = self.compute_api.create(self.context, - FLAGS.default_instance_type, None, security_group=['default']) + ref = self.compute_api.create( + self.context, + instance_type=FLAGS.default_instance_type, + image_id=None, + security_group=['default']) try: - self.assertEqual(len(ref[0]['security_groups']), 1) + self.assertEqual(len(db.security_group_get_by_instance( + self.context, ref[0]['id'])), 1) finally: db.security_group_destroy(self.context, group['id']) db.instance_destroy(self.context, ref[0]['id']) @@ -101,13 +106,13 @@ class ComputeTestCase(test.TestCase): self.compute.run_instance(self.context, instance_id) instances = db.instance_get_all(context.get_admin_context()) - logging.info(_("Running instances: %s"), instances) + LOG.info(_("Running instances: %s"), instances) self.assertEqual(len(instances), 1) self.compute.terminate_instance(self.context, instance_id) instances = db.instance_get_all(context.get_admin_context()) - logging.info(_("After terminating instances: %s"), instances) + LOG.info(_("After terminating instances: %s"), instances) self.assertEqual(len(instances), 0) def test_run_terminate_timestamps(self): @@ -151,6 +156,13 @@ class ComputeTestCase(test.TestCase): self.compute.reboot_instance(self.context, instance_id) self.compute.terminate_instance(self.context, instance_id) + def test_set_admin_password(self): + """Ensure instance can have its admin password set""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.set_admin_password(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) + def test_snapshot(self): """Ensure instance can be snapshotted""" instance_id = self._create_instance() @@ -169,6 +181,16 @@ class ComputeTestCase(test.TestCase): self.assert_(console) self.compute.terminate_instance(self.context, instance_id) + def test_ajax_console(self): + """Make sure we can get console output from instance""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + + console = self.compute.get_ajax_console(self.context, + instance_id) + self.assert_(console) + self.compute.terminate_instance(self.context, instance_id) + def test_run_instance_existing(self): """Ensure failure when running an instance that already exists""" instance_id = self._create_instance() @@ -178,3 +200,22 @@ class ComputeTestCase(test.TestCase): self.context, instance_id) self.compute.terminate_instance(self.context, instance_id) + + def test_lock(self): + """ensure locked instance cannot be changed""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + + non_admin_context = context.RequestContext(None, None, False, False) + + # decorator should return False (fail) with locked nonadmin context + self.compute.lock_instance(self.context, instance_id) + ret_val = self.compute.reboot_instance(non_admin_context, instance_id) + self.assertEqual(ret_val, False) + + # decorator should return None (success) with unlocked nonadmin context + self.compute.unlock_instance(self.context, instance_id) + ret_val = self.compute.reboot_instance(non_admin_context, instance_id) + self.assertEqual(ret_val, None) + + self.compute.terminate_instance(self.context, instance_id) diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py new file mode 100644 index 000000000..85bf94458 --- /dev/null +++ b/nova/tests/test_console.py @@ -0,0 +1,132 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Tests For Console proxy. +""" + +import datetime +import logging + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import test +from nova import utils +from nova.auth import manager +from nova.console import manager as console_manager + +FLAGS = flags.FLAGS + + +class ConsoleTestCase(test.TestCase): + """Test case for console proxy""" + def setUp(self): + logging.getLogger().setLevel(logging.DEBUG) + super(ConsoleTestCase, self).setUp() + self.flags(console_driver='nova.console.fake.FakeConsoleProxy', + stub_compute=True) + self.console = utils.import_object(FLAGS.console_manager) + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake') + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.get_admin_context() + self.host = 'test_compute_host' + + def tearDown(self): + self.manager.delete_user(self.user) + self.manager.delete_project(self.project) + super(ConsoleTestCase, self).tearDown() + + def _create_instance(self): + """Create a test instance""" + inst = {} + #inst['host'] = self.host + #inst['name'] = 'instance-1234' + inst['image_id'] = 'ami-test' + inst['reservation_id'] = 'r-fakeres' + inst['launch_time'] = '10' + inst['user_id'] = self.user.id + inst['project_id'] = self.project.id + inst['instance_type'] = 'm1.tiny' + inst['mac_address'] = utils.generate_mac() + inst['ami_launch_index'] = 0 + return db.instance_create(self.context, inst)['id'] + + def test_get_pool_for_instance_host(self): + pool = self.console.get_pool_for_instance_host(self.context, self.host) + self.assertEqual(pool['compute_host'], self.host) + + def test_get_pool_creates_new_pool_if_needed(self): + self.assertRaises(exception.NotFound, + db.console_pool_get_by_host_type, + self.context, + self.host, + self.console.host, + self.console.driver.console_type) + pool = self.console.get_pool_for_instance_host(self.context, + self.host) + pool2 = db.console_pool_get_by_host_type(self.context, + self.host, + self.console.host, + self.console.driver.console_type) + self.assertEqual(pool['id'], pool2['id']) + + def test_get_pool_does_not_create_new_pool_if_exists(self): + pool_info = {'address': '127.0.0.1', + 'username': 'test', + 'password': '1234pass', + 'host': self.console.host, + 'console_type': self.console.driver.console_type, + 'compute_host': 'sometesthostname'} + new_pool = db.console_pool_create(self.context, pool_info) + pool = self.console.get_pool_for_instance_host(self.context, + 'sometesthostname') + self.assertEqual(pool['id'], new_pool['id']) + + def test_add_console(self): + instance_id = self._create_instance() + self.console.add_console(self.context, instance_id) + instance = db.instance_get(self.context, instance_id) + pool = db.console_pool_get_by_host_type(self.context, + instance['host'], + self.console.host, + self.console.driver.console_type) + + console_instances = [con['instance_id'] for con in pool.consoles] + self.assert_(instance_id in console_instances) + db.instance_destroy(self.context, instance_id) + + def test_add_console_does_not_duplicate(self): + instance_id = self._create_instance() + cons1 = self.console.add_console(self.context, instance_id) + cons2 = self.console.add_console(self.context, instance_id) + self.assertEqual(cons1, cons2) + db.instance_destroy(self.context, instance_id) + + def test_remove_console(self): + instance_id = self._create_instance() + console_id = self.console.add_console(self.context, instance_id) + self.console.remove_console(self.context, console_id) + + self.assertRaises(exception.NotFound, + db.console_get, + self.context, + console_id) + db.instance_destroy(self.context, instance_id) diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py new file mode 100644 index 000000000..8a74b2296 --- /dev/null +++ b/nova/tests/test_direct.py @@ -0,0 +1,103 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for Direct API.""" + +import json +import logging + +import webob + +from nova import compute +from nova import context +from nova import exception +from nova import test +from nova import utils +from nova.api import direct +from nova.tests import test_cloud + + +class FakeService(object): + def echo(self, context, data): + return {'data': data} + + def context(self, context): + return {'user': context.user_id, + 'project': context.project_id} + + +class DirectTestCase(test.TestCase): + def setUp(self): + super(DirectTestCase, self).setUp() + direct.register_service('fake', FakeService()) + self.router = direct.PostParamsMiddleware( + direct.JsonParamsMiddleware( + direct.Router())) + self.auth_router = direct.DelegatedAuthMiddleware(self.router) + self.context = context.RequestContext('user1', 'proj1') + + def tearDown(self): + direct.ROUTES = {} + + def test_delegated_auth(self): + req = webob.Request.blank('/fake/context') + req.headers['X-OpenStack-User'] = 'user1' + req.headers['X-OpenStack-Project'] = 'proj1' + resp = req.get_response(self.auth_router) + data = json.loads(resp.body) + self.assertEqual(data['user'], 'user1') + self.assertEqual(data['project'], 'proj1') + + def test_json_params(self): + req = webob.Request.blank('/fake/echo') + req.environ['openstack.context'] = self.context + req.method = 'POST' + req.body = 'json=%s' % json.dumps({'data': 'foo'}) + resp = req.get_response(self.router) + resp_parsed = json.loads(resp.body) + self.assertEqual(resp_parsed['data'], 'foo') + + def test_post_params(self): + req = webob.Request.blank('/fake/echo') + req.environ['openstack.context'] = self.context + req.method = 'POST' + req.body = 'data=foo' + resp = req.get_response(self.router) + resp_parsed = json.loads(resp.body) + self.assertEqual(resp_parsed['data'], 'foo') + + def test_proxy(self): + proxy = direct.Proxy(self.router) + rv = proxy.fake.echo(self.context, data='baz') + self.assertEqual(rv['data'], 'baz') + + +class DirectCloudTestCase(test_cloud.CloudTestCase): + def setUp(self): + super(DirectCloudTestCase, self).setUp() + compute_handle = compute.API(image_service=self.cloud.image_service, + network_api=self.cloud.network_api, + volume_api=self.cloud.volume_api) + direct.register_service('compute', compute_handle) + self.router = direct.JsonParamsMiddleware(direct.Router()) + proxy = direct.Proxy(self.router) + self.cloud.compute_api = proxy.compute + + def tearDown(self): + super(DirectCloudTestCase, self).tearDown() + direct.ROUTES = {} diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py new file mode 100644 index 000000000..868a5ead3 --- /dev/null +++ b/nova/tests/test_log.py @@ -0,0 +1,110 @@ +import cStringIO + +from nova import context +from nova import log +from nova import test + + +def _fake_context(): + return context.RequestContext(1, 1) + + +class RootLoggerTestCase(test.TestCase): + def setUp(self): + super(RootLoggerTestCase, self).setUp() + self.log = log.logging.root + + def tearDown(self): + super(RootLoggerTestCase, self).tearDown() + log.NovaLogger.manager.loggerDict = {} + + def test_is_nova_instance(self): + self.assert_(isinstance(self.log, log.NovaLogger)) + + def test_name_is_nova_root(self): + self.assertEqual("nova.root", self.log.name) + + def test_handlers_have_nova_formatter(self): + formatters = [] + for h in self.log.handlers: + f = h.formatter + if isinstance(f, log.NovaFormatter): + formatters.append(f) + self.assert_(formatters) + self.assertEqual(len(formatters), len(self.log.handlers)) + + def test_handles_context_kwarg(self): + self.log.info("foo", context=_fake_context()) + self.assert_(True) # didn't raise exception + + def test_module_level_methods_handle_context_arg(self): + log.info("foo", context=_fake_context()) + self.assert_(True) # didn't raise exception + + def test_module_level_audit_handles_context_arg(self): + log.audit("foo", context=_fake_context()) + self.assert_(True) # didn't raise exception + + +class NovaFormatterTestCase(test.TestCase): + def setUp(self): + super(NovaFormatterTestCase, self).setUp() + self.flags(logging_context_format_string="HAS CONTEXT "\ + "[%(request_id)s]: %(message)s", + logging_default_format_string="NOCTXT: %(message)s", + logging_debug_format_suffix="--DBG") + self.log = log.logging.root + self.stream = cStringIO.StringIO() + handler = log.StreamHandler(self.stream) + self.log.addHandler(handler) + self.log.setLevel(log.DEBUG) + + def tearDown(self): + super(NovaFormatterTestCase, self).tearDown() + log.NovaLogger.manager.loggerDict = {} + + def test_uncontextualized_log(self): + self.log.info("foo") + self.assertEqual("NOCTXT: foo\n", self.stream.getvalue()) + + def test_contextualized_log(self): + ctxt = _fake_context() + self.log.info("bar", context=ctxt) + expected = "HAS CONTEXT [%s]: bar\n" % ctxt.request_id + self.assertEqual(expected, self.stream.getvalue()) + + def test_debugging_log(self): + self.log.debug("baz") + self.assertEqual("NOCTXT: baz --DBG\n", self.stream.getvalue()) + + +class NovaLoggerTestCase(test.TestCase): + def setUp(self): + super(NovaLoggerTestCase, self).setUp() + self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False) + self.log = log.getLogger('nova-test') + + def tearDown(self): + super(NovaLoggerTestCase, self).tearDown() + log.NovaLogger.manager.loggerDict = {} + + def test_has_level_from_flags(self): + self.assertEqual(log.AUDIT, self.log.level) + + def test_child_log_has_level_of_parent_flag(self): + l = log.getLogger('nova-test.foo') + self.assertEqual(log.AUDIT, l.level) + + +class VerboseLoggerTestCase(test.TestCase): + def setUp(self): + super(VerboseLoggerTestCase, self).setUp() + self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True) + self.log = log.getLogger('nova.test') + + def tearDown(self): + super(VerboseLoggerTestCase, self).tearDown() + log.NovaLogger.manager.loggerDict = {} + + def test_will_be_verbose_if_named_nova_and_verbose_flag_set(self): + self.assertEqual(log.DEBUG, self.log.level) diff --git a/nova/tests/test_middleware.py b/nova/tests/test_middleware.py index 0febf52d6..9d49167ba 100644 --- a/nova/tests/test_middleware.py +++ b/nova/tests/test_middleware.py @@ -38,7 +38,7 @@ def conditional_forbid(req): return 'OK' -class LockoutTestCase(test.TrialTestCase): +class LockoutTestCase(test.TestCase): """Test case for the Lockout middleware.""" def setUp(self): # pylint: disable-msg=C0103 super(LockoutTestCase, self).setUp() diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 96473ac7c..00f9323f3 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -20,18 +20,18 @@ Unit Tests for network code """ import IPy import os -import logging from nova import context from nova import db from nova import exception from nova import flags -from nova import service +from nova import log as logging from nova import test from nova import utils from nova.auth import manager FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.network') class NetworkTestCase(test.TestCase): @@ -45,7 +45,6 @@ class NetworkTestCase(test.TestCase): fake_network=True, network_size=16, num_networks=5) - logging.getLogger().setLevel(logging.DEBUG) self.manager = manager.AuthManager() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] @@ -97,6 +96,28 @@ class NetworkTestCase(test.TestCase): self.context.project_id = self.projects[project_num].id self.network.deallocate_fixed_ip(self.context, address) + def test_private_ipv6(self): + """Make sure ipv6 is OK""" + if FLAGS.use_ipv6: + instance_ref = self._create_instance(0) + address = self._create_address(0, instance_ref['id']) + network_ref = db.project_get_network( + context.get_admin_context(), + self.context.project_id) + address_v6 = db.instance_get_fixed_address_v6( + context.get_admin_context(), + instance_ref['id']) + self.assertEqual(instance_ref['mac_address'], + utils.to_mac(address_v6)) + instance_ref2 = db.fixed_ip_get_instance_v6( + context.get_admin_context(), + address_v6) + self.assertEqual(instance_ref['id'], instance_ref2['id']) + self.assertEqual(address_v6, + utils.to_global_ipv6( + network_ref['cidr_v6'], + instance_ref['mac_address'])) + def test_public_network_association(self): """Makes sure that we can allocaate a public ip""" # TODO(vish): better way of adding floating ips @@ -328,7 +349,7 @@ def lease_ip(private_ip): 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("ISSUE_IP: %s, %s ", out, err) + LOG.debug("ISSUE_IP: %s, %s ", out, err) def release_ip(private_ip): @@ -344,4 +365,4 @@ def release_ip(private_ip): 'TESTING': '1', 'FLAGFILE': FLAGS.dhcpbridge_flagfile} (out, err) = utils.execute(cmd, addl_env=env) - logging.debug("RELEASE_IP: %s, %s ", out, err) + LOG.debug("RELEASE_IP: %s, %s ", out, err) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index b5f9f30ef..9548a8c13 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -16,11 +16,8 @@ # License for the specific language governing permissions and limitations # under the License. -import logging - from nova import context from nova import db -from nova import exception from nova import flags from nova import quota from nova import test @@ -35,7 +32,6 @@ FLAGS = flags.FLAGS class QuotaTestCase(test.TestCase): def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) super(QuotaTestCase, self).setUp() self.flags(connection_type='fake', quota_instances=2, diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 6ea2edcab..85593ab46 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -18,15 +18,16 @@ """ Unit Tests for remote procedure calls using queue """ -import logging from nova import context from nova import flags +from nova import log as logging from nova import rpc from nova import test FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.rpc') class RpcTestCase(test.TestCase): @@ -85,12 +86,12 @@ class RpcTestCase(test.TestCase): @staticmethod def echo(context, queue, value): """Calls echo in the passed queue""" - logging.debug("Nested received %s, %s", queue, value) + LOG.debug(_("Nested received %s, %s"), queue, value) ret = rpc.call(context, queue, {"method": "echo", "args": {"value": value}}) - logging.debug("Nested return %s", ret) + LOG.debug(_("Nested return %s"), ret) return value nested = Nested() @@ -115,13 +116,13 @@ class TestReceiver(object): @staticmethod def echo(context, value): """Simply returns whatever value is sent in""" - logging.debug("Received %s", value) + LOG.debug(_("Received %s"), value) return value @staticmethod def context(context, value): """Returns dictionary version of context""" - logging.debug("Received %s", context) + LOG.debug(_("Received %s"), context) return context.to_dict() @staticmethod diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index a9937d797..9d458244b 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -21,6 +21,7 @@ Tests For Scheduler import datetime +from mox import IgnoreArg from nova import context from nova import db from nova import flags @@ -76,6 +77,59 @@ class SchedulerTestCase(test.TestCase): scheduler.named_method(ctxt, 'topic', num=7) +class ZoneSchedulerTestCase(test.TestCase): + """Test case for zone scheduler""" + def setUp(self): + super(ZoneSchedulerTestCase, self).setUp() + self.flags(scheduler_driver='nova.scheduler.zone.ZoneScheduler') + + def _create_service_model(self, **kwargs): + service = db.sqlalchemy.models.Service() + service.host = kwargs['host'] + service.disabled = False + service.deleted = False + service.report_count = 0 + service.binary = 'nova-compute' + service.topic = 'compute' + service.id = kwargs['id'] + service.availability_zone = kwargs['zone'] + service.created_at = datetime.datetime.utcnow() + return service + + def test_with_two_zones(self): + scheduler = manager.SchedulerManager() + ctxt = context.get_admin_context() + service_list = [self._create_service_model(id=1, + host='host1', + zone='zone1'), + self._create_service_model(id=2, + host='host2', + zone='zone2'), + self._create_service_model(id=3, + host='host3', + zone='zone2'), + self._create_service_model(id=4, + host='host4', + zone='zone2'), + self._create_service_model(id=5, + host='host5', + zone='zone2')] + self.mox.StubOutWithMock(db, 'service_get_all_by_topic') + arg = IgnoreArg() + db.service_get_all_by_topic(arg, arg).AndReturn(service_list) + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + rpc.cast(ctxt, + 'compute.host1', + {'method': 'run_instance', + 'args': {'instance_id': 'i-ffffffff', + 'availability_zone': 'zone1'}}) + self.mox.ReplayAll() + scheduler.run_instance(ctxt, + 'compute', + instance_id='i-ffffffff', + availability_zone='zone1') + + class SimpleDriverTestCase(test.TestCase): """Test case for simple driver""" def setUp(self): diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index 9f1a181a0..a67c8d1e8 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -133,7 +133,8 @@ class ServiceTestCase(test.TestCase): service_create = {'host': host, 'binary': binary, 'topic': topic, - 'report_count': 0} + 'report_count': 0, + 'availability_zone': 'nova'} service_ref = {'host': host, 'binary': binary, 'report_count': 0, @@ -161,11 +162,13 @@ class ServiceTestCase(test.TestCase): service_create = {'host': host, 'binary': binary, 'topic': topic, - 'report_count': 0} + 'report_count': 0, + 'availability_zone': 'nova'} service_ref = {'host': host, 'binary': binary, 'topic': topic, 'report_count': 0, + 'availability_zone': 'nova', 'id': 1} service.db.service_get_by_args(mox.IgnoreArg(), @@ -193,11 +196,13 @@ class ServiceTestCase(test.TestCase): service_create = {'host': host, 'binary': binary, 'topic': topic, - 'report_count': 0} + 'report_count': 0, + 'availability_zone': 'nova'} service_ref = {'host': host, 'binary': binary, 'topic': topic, 'report_count': 0, + 'availability_zone': 'nova', 'id': 1} service.db.service_get_by_args(mox.IgnoreArg(), @@ -224,11 +229,13 @@ class ServiceTestCase(test.TestCase): service_create = {'host': host, 'binary': binary, 'topic': topic, - 'report_count': 0} + 'report_count': 0, + 'availability_zone': 'nova'} service_ref = {'host': host, 'binary': binary, 'topic': topic, 'report_count': 0, + 'availability_zone': 'nova', 'id': 1} service.db.service_get_by_args(mox.IgnoreArg(), diff --git a/nova/tests/test_twistd.py b/nova/tests/test_twistd.py index 75007b9c8..ff8627c3b 100644 --- a/nova/tests/test_twistd.py +++ b/nova/tests/test_twistd.py @@ -28,7 +28,7 @@ from nova import test FLAGS = flags.FLAGS -class TwistdTestCase(test.TrialTestCase): +class TwistdTestCase(test.TestCase): def setUp(self): super(TwistdTestCase, self).setUp() self.Options = twistd.WrapTwistedOptions(twistd.TwistdServerOptions) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 4aa489d08..556fe561c 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -122,10 +122,10 @@ class LibvirtConnTestCase(test.TestCase): if rescue: check = (lambda t: t.find('./os/kernel').text.split('/')[1], - 'rescue-kernel') + 'kernel.rescue') check_list.append(check) check = (lambda t: t.find('./os/initrd').text.split('/')[1], - 'rescue-ramdisk') + 'ramdisk.rescue') check_list.append(check) else: if expect_kernel: @@ -161,13 +161,16 @@ class LibvirtConnTestCase(test.TestCase): if rescue: common_checks += [ (lambda t: t.findall('./devices/disk/source')[0].get( - 'file').split('/')[1], 'rescue-disk'), + 'file').split('/')[1], 'disk.rescue'), (lambda t: t.findall('./devices/disk/source')[1].get( 'file').split('/')[1], 'disk')] else: common_checks += [(lambda t: t.findall( './devices/disk/source')[0].get('file').split('/')[1], 'disk')] + common_checks += [(lambda t: t.findall( + './devices/disk/source')[1].get('file').split('/')[1], + 'disk.local')] for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type @@ -208,8 +211,141 @@ class LibvirtConnTestCase(test.TestCase): self.manager.delete_user(self.user) -class NWFilterTestCase(test.TestCase): +class IptablesFirewallTestCase(test.TestCase): + def setUp(self): + super(IptablesFirewallTestCase, self).setUp() + + self.manager = manager.AuthManager() + self.user = self.manager.create_user('fake', 'fake', 'fake', + admin=True) + self.project = self.manager.create_project('fake', 'fake', 'fake') + self.context = context.RequestContext('fake', 'fake') + self.network = utils.import_object(FLAGS.network_manager) + self.fw = libvirt_conn.IptablesFirewallDriver() + + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + super(IptablesFirewallTestCase, self).tearDown() + + def _p(self, *args, **kwargs): + if 'iptables-restore' in args: + print ' '.join(args), kwargs['stdin'] + if 'iptables-save' in args: + return + + in_rules = [ + '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010', + '*filter', + ':INPUT ACCEPT [969615:281627771]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [915599:63811649]', + ':nova-block-ipv4 - [0:0]', + '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', + '-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED' + ',ESTABLISHED -j ACCEPT ', + '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', + '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', + '-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ', + '-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ', + 'COMMIT', + '# Completed on Mon Dec 6 11:54:13 2010', + ] + + def test_static_filters(self): + self.fw.execute = self._p + instance_ref = db.instance_create(self.context, + {'user_id': 'fake', + 'project_id': 'fake'}) + ip = '10.11.12.13' + + network_ref = db.project_get_network(self.context, + 'fake') + fixed_ip = {'address': ip, + 'network_id': network_ref['id']} + + admin_ctxt = context.get_admin_context() + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': instance_ref['id']}) + + secgroup = db.security_group_create(admin_ctxt, + {'user_id': 'fake', + 'project_id': 'fake', + 'name': 'testgroup', + 'description': 'test group'}) + + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'icmp', + 'from_port': -1, + 'to_port': -1, + 'cidr': '192.168.11.0/24'}) + + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'icmp', + 'from_port': 8, + 'to_port': -1, + 'cidr': '192.168.11.0/24'}) + + db.security_group_rule_create(admin_ctxt, + {'parent_group_id': secgroup['id'], + 'protocol': 'tcp', + 'from_port': 80, + 'to_port': 81, + 'cidr': '192.168.10.0/24'}) + + db.instance_add_security_group(admin_ctxt, instance_ref['id'], + secgroup['id']) + instance_ref = db.instance_get(admin_ctxt, instance_ref['id']) + + self.fw.add_instance(instance_ref) + + out_rules = self.fw.modify_rules(self.in_rules) + + in_rules = filter(lambda l: not l.startswith('#'), self.in_rules) + for rule in in_rules: + if not 'nova' in rule: + self.assertTrue(rule in out_rules, + 'Rule went missing: %s' % rule) + + instance_chain = None + for rule in out_rules: + # This is pretty crude, but it'll do for now + if '-d 10.11.12.13 -j' in rule: + instance_chain = rule.split(' ')[-1] + break + self.assertTrue(instance_chain, "The instance chain wasn't added") + + security_group_chain = None + for rule in out_rules: + # This is pretty crude, but it'll do for now + if '-A %s -j' % instance_chain in rule: + security_group_chain = rule.split(' ')[-1] + break + self.assertTrue(security_group_chain, + "The security group chain wasn't added") + + self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \ + security_group_chain in out_rules, + "ICMP acceptance rule wasn't added") + + self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type' + ' 8 -j ACCEPT' % security_group_chain in out_rules, + "ICMP Echo Request acceptance rule wasn't added") + + self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport ' + '--dports 80:81 -j ACCEPT' % security_group_chain \ + in out_rules, + "TCP port 80/81 acceptance rule wasn't added") + + +class NWFilterTestCase(test.TestCase): def setUp(self): super(NWFilterTestCase, self).setUp() @@ -224,7 +360,8 @@ class NWFilterTestCase(test.TestCase): self.fake_libvirt_connection = Mock() - self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection) + self.fw = libvirt_conn.NWFilterFirewall( + lambda: self.fake_libvirt_connection) def tearDown(self): self.manager.delete_project(self.project) @@ -337,7 +474,7 @@ class NWFilterTestCase(test.TestCase): self.security_group.id) instance = db.instance_get(self.context, inst_id) - self.fw.setup_base_nwfilters() - self.fw.setup_nwfilters_for_instance(instance) + self.fw.setup_basic_filtering(instance) + self.fw.prepare_instance_filter(instance) _ensure_all_called() self.teardown_security_group() diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index b13455fb0..b40ca004b 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -19,23 +19,23 @@ Tests for Volume Code. """ -import logging from nova import context from nova import exception from nova import db from nova import flags +from nova import log as logging from nova import test from nova import utils FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.volume') class VolumeTestCase(test.TestCase): """Test Case for volumes.""" def setUp(self): - logging.getLogger().setLevel(logging.DEBUG) super(VolumeTestCase, self).setUp() self.compute = utils.import_object(FLAGS.compute_manager) self.flags(connection_type='fake') @@ -159,7 +159,7 @@ class VolumeTestCase(test.TestCase): volume_id) self.assert_(iscsi_target not in targets) targets.append(iscsi_target) - logging.debug("Target %s allocated", iscsi_target) + LOG.debug(_("Target %s allocated"), iscsi_target) total_slots = FLAGS.iscsi_num_targets for _index in xrange(total_slots): volume_id = self._create_volume() diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index ec9462ada..9f5b266f3 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -31,8 +31,10 @@ from nova.compute import power_state from nova.virt import xenapi_conn from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import volume_utils +from nova.virt.xenapi.vmops import SimpleDH from nova.tests.db import fakes as db_fakes from nova.tests.xenapi import stubs +from nova.tests.glance import stubs as glance_stubs FLAGS = flags.FLAGS @@ -107,18 +109,16 @@ class XenAPIVolumeTestCase(test.TestCase): conn = xenapi_conn.get_connection(False) volume = self._create_volume() instance = db.instance_create(self.values) - xenapi_fake.create_vm(instance.name, 'Running') + vm = xenapi_fake.create_vm(instance.name, 'Running') result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc') def check(): # check that the VM has a VBD attached to it - # Get XenAPI reference for the VM - vms = xenapi_fake.get_all('VM') # Get XenAPI record for VBD vbds = xenapi_fake.get_all('VBD') vbd = xenapi_fake.get_record('VBD', vbds[0]) vm_ref = vbd['VM'] - self.assertEqual(vm_ref, vms[0]) + self.assertEqual(vm_ref, vm) check() @@ -156,9 +156,14 @@ class XenAPIVMTestCase(test.TestCase): FLAGS.xenapi_connection_url = 'test_url' FLAGS.xenapi_connection_password = 'test_pass' xenapi_fake.reset() + xenapi_fake.create_local_srs() db_fakes.stub_out_db_instance_api(self.stubs) xenapi_fake.create_network('fake', FLAGS.flat_network_bridge) stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + stubs.stubout_get_this_vm_uuid(self.stubs) + stubs.stubout_stream_disk(self.stubs) + glance_stubs.stubout_glance_client(self.stubs, + glance_stubs.FakeGlance) self.conn = xenapi_conn.get_connection(False) def test_list_instances_0(self): @@ -206,40 +211,70 @@ class XenAPIVMTestCase(test.TestCase): check() - def test_spawn(self): - instance = self._create_instance() + def check_vm_record(self, conn): + instances = conn.list_instances() + self.assertEquals(instances, [1]) + + # Get Nova record for VM + vm_info = conn.get_info(1) + + # Get XenAPI record for VM + vms = [rec for ref, rec + in xenapi_fake.get_all_records('VM').iteritems() + if not rec['is_control_domain']] + vm = vms[0] + + # Check that m1.large above turned into the right thing. + instance_type = instance_types.INSTANCE_TYPES['m1.large'] + mem_kib = long(instance_type['memory_mb']) << 10 + mem_bytes = str(mem_kib << 10) + vcpus = instance_type['vcpus'] + self.assertEquals(vm_info['max_mem'], mem_kib) + self.assertEquals(vm_info['mem'], mem_kib) + self.assertEquals(vm['memory_static_max'], mem_bytes) + self.assertEquals(vm['memory_dynamic_max'], mem_bytes) + self.assertEquals(vm['memory_dynamic_min'], mem_bytes) + self.assertEquals(vm['VCPUs_max'], str(vcpus)) + self.assertEquals(vm['VCPUs_at_startup'], str(vcpus)) + + # Check that the VM is running according to Nova + self.assertEquals(vm_info['state'], power_state.RUNNING) + + # Check that the VM is running according to XenAPI. + self.assertEquals(vm['power_state'], 'Running') + + def _test_spawn(self, image_id, kernel_id, ramdisk_id): + stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) + values = {'name': 1, + 'id': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': image_id, + 'kernel_id': kernel_id, + 'ramdisk_id': ramdisk_id, + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } + conn = xenapi_conn.get_connection(False) + instance = db.instance_create(values) + conn.spawn(instance) + self.check_vm_record(conn) - def check(): - instances = self.conn.list_instances() - self.assertEquals(instances, [1]) - - # Get Nova record for VM - vm_info = self.conn.get_info(1) - - # Get XenAPI record for VM - vms = xenapi_fake.get_all('VM') - vm = xenapi_fake.get_record('VM', vms[0]) - - # Check that m1.large above turned into the right thing. - instance_type = instance_types.INSTANCE_TYPES['m1.large'] - mem_kib = long(instance_type['memory_mb']) << 10 - mem_bytes = str(mem_kib << 10) - vcpus = instance_type['vcpus'] - self.assertEquals(vm_info['max_mem'], mem_kib) - self.assertEquals(vm_info['mem'], mem_kib) - self.assertEquals(vm['memory_static_max'], mem_bytes) - self.assertEquals(vm['memory_dynamic_max'], mem_bytes) - self.assertEquals(vm['memory_dynamic_min'], mem_bytes) - self.assertEquals(vm['VCPUs_max'], str(vcpus)) - self.assertEquals(vm['VCPUs_at_startup'], str(vcpus)) - - # Check that the VM is running according to Nova - self.assertEquals(vm_info['state'], power_state.RUNNING) - - # Check that the VM is running according to XenAPI. - self.assertEquals(vm['power_state'], 'Running') + def test_spawn_raw_objectstore(self): + FLAGS.xenapi_image_service = 'objectstore' + self._test_spawn(1, None, None) - check() + def test_spawn_objectstore(self): + FLAGS.xenapi_image_service = 'objectstore' + self._test_spawn(1, 2, 3) + + def test_spawn_raw_glance(self): + FLAGS.xenapi_image_service = 'glance' + self._test_spawn(1, None, None) + + def test_spawn_glance(self): + FLAGS.xenapi_image_service = 'glance' + self._test_spawn(1, 2, 3) def tearDown(self): super(XenAPIVMTestCase, self).tearDown() @@ -262,3 +297,29 @@ class XenAPIVMTestCase(test.TestCase): instance = db.instance_create(values) self.conn.spawn(instance) return instance + + +class XenAPIDiffieHellmanTestCase(test.TestCase): + """ + Unit tests for Diffie-Hellman code + """ + def setUp(self): + super(XenAPIDiffieHellmanTestCase, self).setUp() + self.alice = SimpleDH() + self.bob = SimpleDH() + + def test_shared(self): + alice_pub = self.alice.get_public() + bob_pub = self.bob.get_public() + alice_shared = self.alice.compute_shared(bob_pub) + bob_shared = self.bob.compute_shared(alice_pub) + self.assertEquals(alice_shared, bob_shared) + + def test_encryption(self): + msg = "This is a top-secret message" + enc = self.alice.encrypt(msg) + dec = self.bob.decrypt(enc) + self.assertEquals(dec, msg) + + def tearDown(self): + super(XenAPIDiffieHellmanTestCase, self).tearDown() diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 55f751f11..624995ada 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -41,9 +41,33 @@ def stubout_instance_snapshot(stubs): rv = done.wait() return rv + def fake_loop(self): + pass + stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', fake_wait_for_task) + stubs.Set(xenapi_conn.XenAPISession, '_stop_loop', fake_loop) + + from nova.virt.xenapi.fake import create_vdi + name_label = "instance-%s" % instance_id + #TODO: create fake SR record + sr_ref = "fakesr" + vdi_ref = create_vdi(name_label=name_label, read_only=False, + sr_ref=sr_ref, sharable=False) + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + vdi_uuid = vdi_rec['uuid'] + return vdi_uuid + + stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) + + def fake_parse_xmlrpc_value(val): + return val + + stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value) + + def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, + original_parent_uuid): from nova.virt.xenapi.fake import create_vdi name_label = "instance-%s" % instance_id #TODO: create fake SR record @@ -91,6 +115,21 @@ def stub_out_get_target(stubs): stubs.Set(volume_utils, '_get_target', fake_get_target) +def stubout_get_this_vm_uuid(stubs): + def f(): + vms = [rec['uuid'] for ref, rec + in fake.get_all_records('VM').iteritems() + if rec['is_control_domain']] + return vms[0] + stubs.Set(vm_utils, 'get_this_vm_uuid', f) + + +def stubout_stream_disk(stubs): + def f(_1, _2, _3, _4): + pass + stubs.Set(vm_utils, '_stream_disk', f) + + class FakeSessionForVMTests(fake.SessionBase): """ Stubs out a XenAPISession for VM tests """ def __init__(self, uri): @@ -100,7 +139,10 @@ class FakeSessionForVMTests(fake.SessionBase): return self.xenapi.network.get_all_records() def host_call_plugin(self, _1, _2, _3, _4, _5): - return '' + sr_ref = fake.get_all('SR')[0] + vdi_ref = fake.create_vdi('', False, sr_ref, False) + vdi_rec = fake.get_record('VDI', vdi_ref) + return '<string>%s</string>' % vdi_rec['uuid'] def VM_start(self, _1, ref, _2, _3): vm = fake.get_record('VM', ref) @@ -135,10 +177,6 @@ class FakeSessionForVolumeTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVolumeTests, self).__init__(uri) - def VBD_plug(self, _1, ref): - rec = fake.get_record('VBD', ref) - rec['currently-attached'] = True - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11): valid_vdi = False diff --git a/nova/twistd.py b/nova/twistd.py index 29be9c4e1..556271999 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -22,7 +22,6 @@ manage pid files and support syslogging. """ import gflags -import logging import os import signal import sys @@ -34,6 +33,7 @@ from twisted.python import runtime from twisted.python import usage from nova import flags +from nova import log as logging if runtime.platformType == "win32": @@ -234,22 +234,12 @@ def serve(filename): OptionsClass = WrapTwistedOptions(TwistdServerOptions) options = OptionsClass() argv = options.parseOptions() - logging.getLogger('amqplib').setLevel(logging.WARN) FLAGS.python = filename FLAGS.no_save = True if not FLAGS.pidfile: FLAGS.pidfile = '%s.pid' % name elif FLAGS.pidfile.endswith('twistd.pid'): FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name) - # NOTE(vish): if we're running nodaemon, redirect the log to stdout - if FLAGS.nodaemon and not FLAGS.logfile: - FLAGS.logfile = "-" - if not FLAGS.logfile: - FLAGS.logfile = '%s.log' % name - elif FLAGS.logfile.endswith('twistd.log'): - FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name) - if FLAGS.logdir: - FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile) if not FLAGS.prefix: FLAGS.prefix = name elif FLAGS.prefix.endswith('twisted'): @@ -270,19 +260,10 @@ def serve(filename): print 'usage: %s [options] [start|stop|restart]' % argv[0] sys.exit(1) - formatter = logging.Formatter( - '(%(name)s): %(levelname)s %(message)s') - handler = logging.StreamHandler(log.StdioOnnaStick()) - handler.setFormatter(formatter) - logging.getLogger().addHandler(handler) - - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - else: - logging.getLogger().setLevel(logging.WARNING) - + logging.basicConfig() logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) + logging.audit(_("Starting %s"), name) twistd.runApp(options) diff --git a/nova/utils.py b/nova/utils.py index 15112faa2..6d3ddd092 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -22,7 +22,7 @@ System-level utilities and helper functions. import datetime import inspect -import logging +import json import os import random import subprocess @@ -31,14 +31,18 @@ import struct import sys import time from xml.sax import saxutils +import re +import netaddr from eventlet import event from eventlet import greenthread from nova import exception from nova.exception import ProcessExecutionError +from nova import log as logging +LOG = logging.getLogger("nova.utils") TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" @@ -109,7 +113,7 @@ def vpn_ping(address, port, timeout=0.05, session_id=None): def fetchfile(url, target): - logging.debug(_("Fetching %s") % url) + LOG.debug(_("Fetching %s") % url) # c = pycurl.Curl() # fp = open(target, "wb") # c.setopt(c.URL, url) @@ -121,7 +125,7 @@ def fetchfile(url, target): def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): - logging.debug(_("Running cmd (subprocess): %s"), cmd) + LOG.debug(_("Running cmd (subprocess): %s"), cmd) env = os.environ.copy() if addl_env: env.update(addl_env) @@ -134,7 +138,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): result = obj.communicate() obj.stdin.close() if obj.returncode: - logging.debug(_("Result was %s") % (obj.returncode)) + LOG.debug(_("Result was %s") % (obj.returncode)) if check_exit_code and obj.returncode != 0: (stdout, stderr) = result raise ProcessExecutionError(exit_code=obj.returncode, @@ -152,6 +156,11 @@ def abspath(s): return os.path.join(os.path.dirname(__file__), s) +def novadir(): + import nova + return os.path.abspath(nova.__file__).split('nova/__init__.pyc')[0] + + def default_flagfile(filename='nova.conf'): for arg in sys.argv: if arg.find('flagfile') != -1: @@ -167,12 +176,12 @@ def default_flagfile(filename='nova.conf'): def debug(arg): - logging.debug('debug in callback: %s', arg) + LOG.debug(_('debug in callback: %s'), arg) return arg def runthis(prompt, cmd, check_exit_code=True): - logging.debug(_("Running %s") % (cmd)) + LOG.debug(_("Running %s"), (cmd)) rv, err = execute(cmd, check_exit_code=check_exit_code) @@ -194,17 +203,38 @@ def last_octet(address): return int(address.split(".")[-1]) -def get_my_ip(): - """Returns the actual ip of the local machine.""" +def get_my_linklocal(interface): try: - csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - csock.connect(('8.8.8.8', 80)) - (addr, port) = csock.getsockname() - csock.close() - return addr - except socket.gaierror as ex: - logging.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex) - return "127.0.0.1" + if_str = execute("ip -f inet6 -o addr show %s" % interface) + condition = "\s+inet6\s+([0-9a-f:]+/\d+)\s+scope\s+link" + links = [re.search(condition, x) for x in if_str[0].split('\n')] + address = [w.group(1) for w in links if w is not None] + if address[0] is not None: + return address[0] + else: + return 'fe00::' + except IndexError as ex: + LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex) + except ProcessExecutionError as ex: + LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex) + except: + return 'fe00::' + + +def to_global_ipv6(prefix, mac): + mac64 = netaddr.EUI(mac).eui64().words + int_addr = int(''.join(['%02x' % i for i in mac64]), 16) + mac64_addr = netaddr.IPAddress(int_addr) + maskIP = netaddr.IPNetwork(prefix).ip + return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).format() + + +def to_mac(ipv6_address): + address = netaddr.IPAddress(ipv6_address) + mask1 = netaddr.IPAddress("::ffff:ffff:ffff:ffff") + mask2 = netaddr.IPAddress("::0200:0:0:0") + mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words + return ":".join(["%02x" % i for i in mac64[0:3] + mac64[5:8]]) def utcnow(): @@ -296,7 +326,7 @@ class LazyPluggable(object): fromlist = backend self.__backend = __import__(name, None, None, fromlist) - logging.info('backend %s', self.__backend) + LOG.debug(_('backend %s'), self.__backend) return self.__backend def __getattr__(self, key): @@ -304,6 +334,20 @@ class LazyPluggable(object): return getattr(backend, key) +class LoopingCallDone(Exception): + """The poll-function passed to LoopingCall can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCall.wait() + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCall.wait() should return""" + self.retvalue = retvalue + + class LoopingCall(object): def __init__(self, f=None, *args, **kw): self.args = args @@ -322,12 +366,15 @@ class LoopingCall(object): while self._running: self.f(*self.args, **self.kw) greenthread.sleep(interval) + except LoopingCallDone, e: + self.stop() + done.send(e.retvalue) except Exception: logging.exception('in looping call') done.send_exception(*sys.exc_info()) return - - done.send(True) + else: + done.send(True) self.done = done @@ -362,3 +409,36 @@ def utf8(value): return value.encode("utf-8") assert isinstance(value, str) return value + + +def to_primitive(value): + if type(value) is type([]) or type(value) is type((None,)): + o = [] + for v in value: + o.append(to_primitive(v)) + return o + elif type(value) is type({}): + o = {} + for k, v in value.iteritems(): + o[k] = to_primitive(v) + return o + elif isinstance(value, datetime.datetime): + return str(value) + elif hasattr(value, 'iteritems'): + return to_primitive(dict(value.iteritems())) + elif hasattr(value, '__iter__'): + return to_primitive(list(value)) + else: + return value + + +def dumps(value): + try: + return json.dumps(value) + except TypeError: + pass + return json.dumps(to_primitive(value)) + + +def loads(s): + return json.loads(s) diff --git a/nova/version.py b/nova/version.py new file mode 100644 index 000000000..7b27acb6a --- /dev/null +++ b/nova/version.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +try: + from nova.vcsversion import version_info +except ImportError: + version_info = {'branch_nick': u'LOCALBRANCH', + 'revision_id': 'LOCALREVISION', + 'revno': 0} + +NOVA_VERSION = ['2011', '1'] +YEAR, COUNT = NOVA_VERSION + +FINAL = False # This becomes true at Release Candidate time + + +def canonical_version_string(): + return '.'.join([YEAR, COUNT]) + + +def version_string(): + if FINAL: + return canonical_version_string() + else: + return '%s-dev' % (canonical_version_string(),) + + +def vcs_version_string(): + return "%s:%s" % (version_info['branch_nick'], version_info['revision_id']) + + +def version_string_with_vcs(): + return "%s-%s" % (canonical_version_string(), vcs_version_string()) diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 61e99944e..13181b730 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -19,15 +19,17 @@ """Abstraction of the underlying virtualization API.""" -import logging import sys from nova import flags +from nova import log as logging from nova.virt import fake from nova.virt import libvirt_conn from nova.virt import xenapi_conn +from nova.virt import hyperv +LOG = logging.getLogger("nova.virt.connection") FLAGS = flags.FLAGS @@ -62,10 +64,12 @@ def get_connection(read_only=False): conn = libvirt_conn.get_connection(read_only) elif t == 'xenapi': conn = xenapi_conn.get_connection(read_only) + elif t == 'hyperv': + conn = hyperv.get_connection(read_only) else: raise Exception('Unknown connection type "%s"' % t) if conn is None: - logging.error(_('Failed to open connection to the hypervisor')) + LOG.error(_('Failed to open connection to the hypervisor')) sys.exit(1) return conn diff --git a/nova/virt/disk.py b/nova/virt/disk.py new file mode 100644 index 000000000..c5565abfa --- /dev/null +++ b/nova/virt/disk.py @@ -0,0 +1,186 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Utility methods to resize, repartition, and modify disk images. + +Includes injection of SSH PGP keys into authorized_keys file. + +""" + +import os +import tempfile +import time + +from nova import exception +from nova import flags +from nova import log as logging +from nova import utils + + +LOG = logging.getLogger('nova.compute.disk') +FLAGS = flags.FLAGS +flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10, + 'minimum size in bytes of root partition') +flags.DEFINE_integer('block_size', 1024 * 1024 * 256, + 'block_size to use for dd') + + +def extend(image, size): + """Increase image to size""" + file_size = os.path.getsize(image) + if file_size >= size: + return + utils.execute('truncate -s %s %s' % (size, image)) + # NOTE(vish): attempts to resize filesystem + utils.execute('e2fsck -fp %s' % image, check_exit_code=False) + utils.execute('resize2fs %s' % image, check_exit_code=False) + + +def inject_data(image, key=None, net=None, partition=None, nbd=False): + """Injects a ssh key and optionally net data into a disk image. + + it will mount the image as a fully partitioned disk and attempt to inject + into the specified partition number. + + If partition is not specified it mounts the image as a single partition. + + """ + device = _link_device(image, nbd) + try: + if not partition is None: + # create partition + out, err = utils.execute('sudo kpartx -a %s' % device) + if err: + raise exception.Error(_('Failed to load partition: %s') % err) + mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1], + partition) + else: + mapped_device = device + + # We can only loopback mount raw images. If the device isn't there, + # it's normally because it's a .vmdk or a .vdi etc + if not os.path.exists(mapped_device): + raise exception.Error('Mapped device was not found (we can' + ' only inject raw disk images): %s' % + mapped_device) + + # Configure ext2fs so that it doesn't auto-check every N boots + out, err = utils.execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) + + tmpdir = tempfile.mkdtemp() + try: + # mount loopback to dir + out, err = utils.execute( + 'sudo mount %s %s' % (mapped_device, tmpdir)) + if err: + raise exception.Error(_('Failed to mount filesystem: %s') + % err) + + try: + if key: + # inject key file + _inject_key_into_fs(key, tmpdir) + if net: + _inject_net_into_fs(net, tmpdir) + finally: + # unmount device + utils.execute('sudo umount %s' % mapped_device) + finally: + # remove temporary directory + utils.execute('rmdir %s' % tmpdir) + if not partition is None: + # remove partitions + utils.execute('sudo kpartx -d %s' % device) + finally: + _unlink_device(device, nbd) + + +def _link_device(image, nbd): + """Link image to device using loopback or nbd""" + if nbd: + device = _allocate_device() + utils.execute('sudo qemu-nbd -c %s %s' % (device, image)) + # NOTE(vish): this forks into another process, so give it a chance + # to set up before continuuing + for i in xrange(10): + if os.path.exists("/sys/block/%s/pid" % os.path.basename(device)): + return device + time.sleep(1) + raise exception.Error(_('nbd device %s did not show up') % device) + else: + out, err = utils.execute('sudo losetup --find --show %s' % image) + if err: + raise exception.Error(_('Could not attach image to loopback: %s') + % err) + return out.strip() + + +def _unlink_device(device, nbd): + """Unlink image from device using loopback or nbd""" + if nbd: + utils.execute('sudo qemu-nbd -d %s' % device) + _free_device(device) + else: + utils.execute('sudo losetup --detach %s' % device) + + +_DEVICES = ['/dev/nbd%s' % i for i in xrange(16)] + + +def _allocate_device(): + # NOTE(vish): This assumes no other processes are allocating nbd devices. + # It may race cause a race condition if multiple + # workers are running on a given machine. + while True: + if not _DEVICES: + raise exception.Error(_('No free nbd devices')) + device = _DEVICES.pop() + if not os.path.exists("/sys/block/%s/pid" % os.path.basename(device)): + break + return device + + +def _free_device(device): + _DEVICES.append(device) + + +def _inject_key_into_fs(key, fs): + """Add the given public ssh key to root's authorized_keys. + + key is an ssh key string. + fs is the path to the base of the filesystem into which to inject the key. + """ + sshdir = os.path.join(fs, 'root', '.ssh') + utils.execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter + utils.execute('sudo chown root %s' % sshdir) + utils.execute('sudo chmod 700 %s' % sshdir) + keyfile = os.path.join(sshdir, 'authorized_keys') + utils.execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') + + +def _inject_net_into_fs(net, fs): + """Inject /etc/network/interfaces into the filesystem rooted at fs. + + net is the contents of /etc/network/interfaces. + """ + netdir = os.path.join(os.path.join(fs, 'etc'), 'network') + utils.execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter + utils.execute('sudo chown root:root %s' % netdir) + utils.execute('sudo chmod 755 %s' % netdir) + netfile = os.path.join(netdir, 'interfaces') + utils.execute('sudo tee %s' % netfile, net) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 32541f5b4..f8b3c7807 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -98,7 +98,7 @@ class FakeConnection(object): the new instance. The work will be done asynchronously. This function returns a - Deferred that allows the caller to detect when it is complete. + task that allows the caller to detect when it is complete. Once this successfully completes, the instance should be running (power_state.RUNNING). @@ -122,7 +122,7 @@ class FakeConnection(object): The second parameter is the name of the snapshot. The work will be done asynchronously. This function returns a - Deferred that allows the caller to detect when it is complete. + task that allows the caller to detect when it is complete. """ pass @@ -134,7 +134,20 @@ class FakeConnection(object): and so the instance is being specified as instance.name. The work will be done asynchronously. This function returns a - Deferred that allows the caller to detect when it is complete. + task that allows the caller to detect when it is complete. + """ + pass + + def set_admin_password(self, instance, new_pass): + """ + Set the root password on the specified instance. + + The first parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. The second + parameter is the value of the new password. + + The work will be done asynchronously. This function returns a + task that allows the caller to detect when it is complete. """ pass @@ -182,7 +195,7 @@ class FakeConnection(object): and so the instance is being specified as instance.name. The work will be done asynchronously. This function returns a - Deferred that allows the caller to detect when it is complete. + task that allows the caller to detect when it is complete. """ del self.instances[instance.name] @@ -289,6 +302,62 @@ class FakeConnection(object): def get_console_output(self, instance): return 'FAKE CONSOLE OUTPUT' + def get_ajax_console(self, instance): + return 'http://fakeajaxconsole.com/?token=FAKETOKEN' + + def get_console_pool_info(self, console_type): + return {'address': '127.0.0.1', + 'username': 'fakeuser', + 'password': 'fakepassword'} + + def refresh_security_group_rules(self, security_group_id): + """This method is called after a change to security groups. + + All security groups and their associated rules live in the datastore, + and calling this method should apply the updated rules to instances + running the specified security group. + + An error should be raised if the operation cannot complete. + + """ + return True + + def refresh_security_group_members(self, security_group_id): + """This method is called when a security group is added to an instance. + + This message is sent to the virtualization drivers on hosts that are + running an instance that belongs to a security group that has a rule + that references the security group identified by `security_group_id`. + It is the responsiblity of this method to make sure any rules + that authorize traffic flow with members of the security group are + updated and any new members can communicate, and any removed members + cannot. + + Scenario: + * we are running on host 'H0' and we have an instance 'i-0'. + * instance 'i-0' is a member of security group 'speaks-b' + * group 'speaks-b' has an ingress rule that authorizes group 'b' + * another host 'H1' runs an instance 'i-1' + * instance 'i-1' is a member of security group 'b' + + When 'i-1' launches or terminates we will recieve the message + to update members of group 'b', at which time we will make + any changes needed to the rules for instance 'i-0' to allow + or deny traffic coming from 'i-1', depending on if it is being + added or removed from the group. + + In this scenario, 'i-1' could just as easily have been running on our + host 'H0' and this method would still have been called. The point was + that this method isn't called on the host where instances of that + group are running (as is the case with + :method:`refresh_security_group_rules`) but is called where references + are made to authorizing those instances. + + An error should be raised if the operation cannot complete. + + """ + return True + class FakeInstance(object): diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py new file mode 100644 index 000000000..30dc1c79b --- /dev/null +++ b/nova/virt/hyperv.py @@ -0,0 +1,462 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Cloud.com, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A connection to Hyper-V . +Uses Windows Management Instrumentation (WMI) calls to interact with Hyper-V +Hyper-V WMI usage: + http://msdn.microsoft.com/en-us/library/cc723875%28v=VS.85%29.aspx +The Hyper-V object model briefly: + The physical computer and its hosted virtual machines are each represented + by the Msvm_ComputerSystem class. + + Each virtual machine is associated with a + Msvm_VirtualSystemGlobalSettingData (vs_gs_data) instance and one or more + Msvm_VirtualSystemSettingData (vmsetting) instances. For each vmsetting + there is a series of Msvm_ResourceAllocationSettingData (rasd) objects. + The rasd objects describe the settings for each device in a VM. + Together, the vs_gs_data, vmsettings and rasds describe the configuration + of the virtual machine. + + Creating new resources such as disks and nics involves cloning a default + rasd object and appropriately modifying the clone and calling the + AddVirtualSystemResources WMI method + Changing resources such as memory uses the ModifyVirtualSystemResources + WMI method + +Using the Python WMI library: + Tutorial: + http://timgolden.me.uk/python/wmi/tutorial.html + Hyper-V WMI objects can be retrieved simply by using the class name + of the WMI object and optionally specifying a column to filter the + result set. More complex filters can be formed using WQL (sql-like) + queries. + The parameters and return tuples of WMI method calls can gleaned by + examining the doc string. For example: + >>> vs_man_svc.ModifyVirtualSystemResources.__doc__ + ModifyVirtualSystemResources (ComputerSystem, ResourceSettingData[]) + => (Job, ReturnValue)' + When passing setting data (ResourceSettingData) to the WMI method, + an XML representation of the data is passed in using GetText_(1). + Available methods on a service can be determined using method.keys(): + >>> vs_man_svc.methods.keys() + vmsettings and rasds for a vm can be retrieved using the 'associators' + method with the appropriate return class. + Long running WMI commands generally return a Job (an instance of + Msvm_ConcreteJob) whose state can be polled to determine when it finishes + +""" + +import os +import time + +from nova import exception +from nova import flags +from nova import log as logging +from nova.auth import manager +from nova.compute import power_state +from nova.virt import images + +wmi = None + + +FLAGS = flags.FLAGS + + +LOG = logging.getLogger('nova.virt.hyperv') + + +HYPERV_POWER_STATE = { + 3: power_state.SHUTDOWN, + 2: power_state.RUNNING, + 32768: power_state.PAUSED, +} + + +REQ_POWER_STATE = { + 'Enabled': 2, + 'Disabled': 3, + 'Reboot': 10, + 'Reset': 11, + 'Paused': 32768, + 'Suspended': 32769, +} + + +WMI_JOB_STATUS_STARTED = 4096 +WMI_JOB_STATE_RUNNING = 4 +WMI_JOB_STATE_COMPLETED = 7 + + +def get_connection(_): + global wmi + if wmi is None: + wmi = __import__('wmi') + return HyperVConnection() + + +class HyperVConnection(object): + def __init__(self): + self._conn = wmi.WMI(moniker='//./root/virtualization') + self._cim_conn = wmi.WMI(moniker='//./root/cimv2') + + def init_host(self): + #FIXME(chiradeep): implement this + LOG.debug(_('In init host')) + pass + + def list_instances(self): + """ Return the names of all the instances known to Hyper-V. """ + vms = [v.ElementName \ + for v in self._conn.Msvm_ComputerSystem(['ElementName'])] + return vms + + def spawn(self, instance): + """ Create a new VM and start it.""" + vm = self._lookup(instance.name) + if vm is not None: + raise exception.Duplicate(_('Attempt to create duplicate vm %s') % + instance.name) + + user = manager.AuthManager().get_user(instance['user_id']) + project = manager.AuthManager().get_project(instance['project_id']) + #Fetch the file, assume it is a VHD file. + base_vhd_filename = os.path.join(FLAGS.instances_path, + instance.name) + vhdfile = "%s.vhd" % (base_vhd_filename) + images.fetch(instance['image_id'], vhdfile, user, project) + + try: + self._create_vm(instance) + + self._create_disk(instance['name'], vhdfile) + self._create_nic(instance['name'], instance['mac_address']) + + LOG.debug(_('Starting VM %s '), instance.name) + self._set_vm_state(instance['name'], 'Enabled') + LOG.info(_('Started VM %s '), instance.name) + except Exception as exn: + LOG.exception(_('spawn vm failed: %s'), exn) + self.destroy(instance) + + def _create_vm(self, instance): + """Create a VM but don't start it. """ + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + + vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new() + vs_gs_data.ElementName = instance['name'] + (job, ret_val) = vs_man_svc.DefineVirtualSystem( + [], None, vs_gs_data.GetText_(1))[1:] + if ret_val == WMI_JOB_STATUS_STARTED: + success = self._check_job_status(job) + else: + success = (ret_val == 0) + + if not success: + raise Exception(_('Failed to create VM %s'), instance.name) + + LOG.debug(_('Created VM %s...'), instance.name) + vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0] + + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + vmsetting = [s for s in vmsettings + if s.SettingType == 3][0] # avoid snapshots + memsetting = vmsetting.associators( + wmi_result_class='Msvm_MemorySettingData')[0] + #No Dynamic Memory, so reservation, limit and quantity are identical. + mem = long(str(instance['memory_mb'])) + memsetting.VirtualQuantity = mem + memsetting.Reservation = mem + memsetting.Limit = mem + + (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( + vm.path_(), [memsetting.GetText_(1)]) + LOG.debug(_('Set memory for vm %s...'), instance.name) + procsetting = vmsetting.associators( + wmi_result_class='Msvm_ProcessorSettingData')[0] + vcpus = long(instance['vcpus']) + procsetting.VirtualQuantity = vcpus + procsetting.Reservation = vcpus + procsetting.Limit = vcpus + + (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( + vm.path_(), [procsetting.GetText_(1)]) + LOG.debug(_('Set vcpus for vm %s...'), instance.name) + + def _create_disk(self, vm_name, vhdfile): + """Create a disk and attach it to the vm""" + LOG.debug(_('Creating disk for %s by attaching disk file %s'), + vm_name, vhdfile) + #Find the IDE controller for the vm. + vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) + vm = vms[0] + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + rasds = vmsettings[0].associators( + wmi_result_class='MSVM_ResourceAllocationSettingData') + ctrller = [r for r in rasds + if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\ + and r.Address == "0"] + #Find the default disk drive object for the vm and clone it. + diskdflt = self._conn.query( + "SELECT * FROM Msvm_ResourceAllocationSettingData \ + WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\ + AND InstanceID LIKE '%Default%'")[0] + diskdrive = self._clone_wmi_obj( + 'Msvm_ResourceAllocationSettingData', diskdflt) + #Set the IDE ctrller as parent. + diskdrive.Parent = ctrller[0].path_() + diskdrive.Address = 0 + #Add the cloned disk drive object to the vm. + new_resources = self._add_virt_resource(diskdrive, vm) + if new_resources is None: + raise Exception(_('Failed to add diskdrive to VM %s'), + vm_name) + diskdrive_path = new_resources[0] + LOG.debug(_('New disk drive path is %s'), diskdrive_path) + #Find the default VHD disk object. + vhddefault = self._conn.query( + "SELECT * FROM Msvm_ResourceAllocationSettingData \ + WHERE ResourceSubType LIKE 'Microsoft Virtual Hard Disk' AND \ + InstanceID LIKE '%Default%' ")[0] + + #Clone the default and point it to the image file. + vhddisk = self._clone_wmi_obj( + 'Msvm_ResourceAllocationSettingData', vhddefault) + #Set the new drive as the parent. + vhddisk.Parent = diskdrive_path + vhddisk.Connection = [vhdfile] + + #Add the new vhd object as a virtual hard disk to the vm. + new_resources = self._add_virt_resource(vhddisk, vm) + if new_resources is None: + raise Exception(_('Failed to add vhd file to VM %s'), + vm_name) + LOG.info(_('Created disk for %s'), vm_name) + + def _create_nic(self, vm_name, mac): + """Create a (emulated) nic and attach it to the vm""" + LOG.debug(_('Creating nic for %s '), vm_name) + #Find the vswitch that is connected to the physical nic. + vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) + extswitch = self._find_external_network() + vm = vms[0] + switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] + #Find the default nic and clone it to create a new nic for the vm. + #Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with + #Linux Integration Components installed. + emulatednics_data = self._conn.Msvm_EmulatedEthernetPortSettingData() + default_nic_data = [n for n in emulatednics_data + if n.InstanceID.rfind('Default') > 0] + new_nic_data = self._clone_wmi_obj( + 'Msvm_EmulatedEthernetPortSettingData', + default_nic_data[0]) + #Create a port on the vswitch. + (new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name, + "", extswitch.path_()) + if ret_val != 0: + LOG.error(_('Failed creating a port on the external vswitch')) + raise Exception(_('Failed creating port for %s'), + vm_name) + LOG.debug(_("Created switch port %s on switch %s"), + vm_name, extswitch.path_()) + #Connect the new nic to the new port. + new_nic_data.Connection = [new_port] + new_nic_data.ElementName = vm_name + ' nic' + new_nic_data.Address = ''.join(mac.split(':')) + new_nic_data.StaticMacAddress = 'TRUE' + #Add the new nic to the vm. + new_resources = self._add_virt_resource(new_nic_data, vm) + if new_resources is None: + raise Exception(_('Failed to add nic to VM %s'), + vm_name) + LOG.info(_("Created nic for %s "), vm_name) + + def _add_virt_resource(self, res_setting_data, target_vm): + """Add a new resource (disk/nic) to the VM""" + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job, new_resources, ret_val) = vs_man_svc.\ + AddVirtualSystemResources([res_setting_data.GetText_(1)], + target_vm.path_()) + success = True + if ret_val == WMI_JOB_STATUS_STARTED: + success = self._check_job_status(job) + else: + success = (ret_val == 0) + if success: + return new_resources + else: + return None + + #TODO: use the reactor to poll instead of sleep + def _check_job_status(self, jobpath): + """Poll WMI job state for completion""" + #Jobs have a path of the form: + #\\WIN-P5IG7367DAG\root\virtualization:Msvm_ConcreteJob.InstanceID= + #"8A496B9C-AF4D-4E98-BD3C-1128CD85320D" + inst_id = jobpath.split('=')[1].strip('"') + jobs = self._conn.Msvm_ConcreteJob(InstanceID=inst_id) + if len(jobs) == 0: + return False + job = jobs[0] + while job.JobState == WMI_JOB_STATE_RUNNING: + time.sleep(0.1) + job = self._conn.Msvm_ConcreteJob(InstanceID=inst_id)[0] + if job.JobState != WMI_JOB_STATE_COMPLETED: + LOG.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription) + return False + LOG.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description, + job.ElapsedTime) + return True + + def _find_external_network(self): + """Find the vswitch that is connected to the physical nic. + Assumes only one physical nic on the host + """ + #If there are no physical nics connected to networks, return. + bound = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE') + if len(bound) == 0: + return None + return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0]\ + .associators(wmi_result_class='Msvm_SwitchLANEndpoint')[0]\ + .associators(wmi_result_class='Msvm_SwitchPort')[0]\ + .associators(wmi_result_class='Msvm_VirtualSwitch')[0] + + def _clone_wmi_obj(self, wmi_class, wmi_obj): + """Clone a WMI object""" + cl = self._conn.__getattr__(wmi_class) # get the class + newinst = cl.new() + #Copy the properties from the original. + for prop in wmi_obj._properties: + newinst.Properties_.Item(prop).Value =\ + wmi_obj.Properties_.Item(prop).Value + return newinst + + def reboot(self, instance): + """Reboot the specified instance.""" + vm = self._lookup(instance.name) + if vm is None: + raise exception.NotFound('instance not present %s' % instance.name) + self._set_vm_state(instance.name, 'Reboot') + + def destroy(self, instance): + """Destroy the VM. Also destroy the associated VHD disk files""" + LOG.debug(_("Got request to destroy vm %s"), instance.name) + vm = self._lookup(instance.name) + if vm is None: + return + vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0] + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + #Stop the VM first. + self._set_vm_state(instance.name, 'Disabled') + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + rasds = vmsettings[0].associators( + wmi_result_class='MSVM_ResourceAllocationSettingData') + disks = [r for r in rasds \ + if r.ResourceSubType == 'Microsoft Virtual Hard Disk'] + diskfiles = [] + #Collect disk file information before destroying the VM. + for disk in disks: + diskfiles.extend([c for c in disk.Connection]) + #Nuke the VM. Does not destroy disks. + (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_()) + if ret_val == WMI_JOB_STATUS_STARTED: + success = self._check_job_status(job) + elif ret_val == 0: + success = True + if not success: + raise Exception(_('Failed to destroy vm %s') % instance.name) + #Delete associated vhd disk files. + for disk in diskfiles: + vhdfile = self._cim_conn.CIM_DataFile(Name=disk) + for vf in vhdfile: + vf.Delete() + LOG.debug(_("Del: disk %s vm %s"), vhdfile, instance.name) + + def get_info(self, instance_id): + """Get information about the VM""" + vm = self._lookup(instance_id) + if vm is None: + raise exception.NotFound('instance not present %s' % instance_id) + vm = self._conn.Msvm_ComputerSystem(ElementName=instance_id)[0] + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + settings_paths = [v.path_() for v in vmsettings] + #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx + summary_info = vs_man_svc.GetSummaryInformation( + [4, 100, 103, 105], settings_paths)[1] + info = summary_info[0] + LOG.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \ + cpu_time=%s"), instance_id, + str(HYPERV_POWER_STATE[info.EnabledState]), + str(info.MemoryUsage), + str(info.NumberOfProcessors), + str(info.UpTime)) + + return {'state': HYPERV_POWER_STATE[info.EnabledState], + 'max_mem': info.MemoryUsage, + 'mem': info.MemoryUsage, + 'num_cpu': info.NumberOfProcessors, + 'cpu_time': info.UpTime} + + def _lookup(self, i): + vms = self._conn.Msvm_ComputerSystem(ElementName=i) + n = len(vms) + if n == 0: + return None + elif n > 1: + raise Exception(_('duplicate name found: %s') % i) + else: + return vms[0].ElementName + + def _set_vm_state(self, vm_name, req_state): + """Set the desired state of the VM""" + vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) + if len(vms) == 0: + return False + (job, ret_val) = vms[0].RequestStateChange(REQ_POWER_STATE[req_state]) + success = False + if ret_val == WMI_JOB_STATUS_STARTED: + success = self._check_job_status(job) + elif ret_val == 0: + success = True + elif ret_val == 32775: + #Invalid state for current operation. Typically means it is + #already in the state requested + success = True + if success: + LOG.info(_("Successfully changed vm state of %s to %s"), vm_name, + req_state) + else: + LOG.error(_("Failed to change vm state of %s to %s"), vm_name, + req_state) + raise Exception(_("Failed to change vm state of %s to %s"), + vm_name, req_state) + + def attach_volume(self, instance_name, device_path, mountpoint): + vm = self._lookup(instance_name) + if vm is None: + raise exception.NotFound('Cannot attach volume to missing %s vm' % + instance_name) + + def detach_volume(self, instance_name, mountpoint): + vm = self._lookup(instance_name) + if vm is None: + raise exception.NotFound('Cannot detach volume from missing %s ' % + instance_name) diff --git a/nova/virt/images.py b/nova/virt/images.py index 1c9b2e093..ecf0e5efb 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -22,10 +22,14 @@ Handling of VM disk images. """ import os.path +import shutil +import sys import time +import urllib2 import urlparse from nova import flags +from nova import log as logging from nova import utils from nova.auth import manager from nova.auth import signer @@ -36,6 +40,8 @@ FLAGS = flags.FLAGS flags.DEFINE_bool('use_s3', True, 'whether to get images from s3 or use local copy') +LOG = logging.getLogger('nova.virt.images') + def fetch(image, path, user, project): if FLAGS.use_s3: @@ -45,6 +51,25 @@ def fetch(image, path, user, project): return f(image, path, user, project) +def _fetch_image_no_curl(url, path, headers): + request = urllib2.Request(url) + for (k, v) in headers.iteritems(): + request.add_header(k, v) + + def urlretrieve(urlfile, fpath): + chunk = 1 * 1024 * 1024 + f = open(fpath, "wb") + while 1: + data = urlfile.read(chunk) + if not data: + break + f.write(data) + + urlopened = urllib2.urlopen(request) + urlretrieve(urlopened, path) + LOG.debug(_("Finished retreving %s -- placed in %s"), url, path) + + def _fetch_s3_image(image, path, user, project): url = image_url(image) @@ -61,18 +86,24 @@ def _fetch_s3_image(image, path, user, project): url_path) headers['Authorization'] = 'AWS %s:%s' % (access, signature) - cmd = ['/usr/bin/curl', '--fail', '--silent', url] - for (k, v) in headers.iteritems(): - cmd += ['-H', '"%s: %s"' % (k, v)] + if sys.platform.startswith('win'): + return _fetch_image_no_curl(url, path, headers) + else: + cmd = ['/usr/bin/curl', '--fail', '--silent', url] + for (k, v) in headers.iteritems(): + cmd += ['-H', '\'%s: %s\'' % (k, v)] - cmd += ['-o', path] - cmd_out = ' '.join(cmd) - return utils.execute(cmd_out) + cmd += ['-o', path] + cmd_out = ' '.join(cmd) + return utils.execute(cmd_out) def _fetch_local_image(image, path, user, project): - source = _image_path('%s/image' % image) - return utils.execute('cp %s %s' % (source, path)) + source = _image_path(os.path.join(image, 'image')) + if sys.platform.startswith('win'): + return shutil.copy(source, path) + else: + return utils.execute('cp %s %s' % (source, path)) def _image_path(path): diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index 3fb2243da..8139c3620 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -7,28 +7,28 @@ #set $disk_bus = 'uml' <type>uml</type> <kernel>/usr/bin/linux</kernel> - <root>/dev/ubda1</root> + <root>/dev/ubda</root> #else #if $type == 'xen' #set $disk_prefix = 'sd' #set $disk_bus = 'scsi' <type>linux</type> - <root>/dev/xvda1</root> + <root>/dev/xvda</root> #else #set $disk_prefix = 'vd' #set $disk_bus = 'virtio' <type>hvm</type> - #end if + #end if #if $getVar('rescue', False) - <kernel>${basepath}/rescue-kernel</kernel> - <initrd>${basepath}/rescue-ramdisk</initrd> + <kernel>${basepath}/kernel.rescue</kernel> + <initrd>${basepath}/ramdisk.rescue</initrd> #else #if $getVar('kernel', None) <kernel>${kernel}</kernel> #if $type == 'xen' <cmdline>ro</cmdline> #else - <cmdline>root=/dev/vda1 console=ttyS0</cmdline> + <cmdline>root=/dev/vda console=ttyS0</cmdline> #end if #if $getVar('ramdisk', None) <initrd>${ramdisk}</initrd> @@ -46,18 +46,28 @@ <devices> #if $getVar('rescue', False) <disk type='file'> - <source file='${basepath}/rescue-disk'/> + <driver type='${driver_type}'/> + <source file='${basepath}/disk.rescue'/> <target dev='${disk_prefix}a' bus='${disk_bus}'/> </disk> <disk type='file'> + <driver type='${driver_type}'/> <source file='${basepath}/disk'/> <target dev='${disk_prefix}b' bus='${disk_bus}'/> </disk> #else <disk type='file'> + <driver type='${driver_type}'/> <source file='${basepath}/disk'/> <target dev='${disk_prefix}a' bus='${disk_bus}'/> </disk> + #if $getVar('local', False) + <disk type='file'> + <driver type='${driver_type}'/> + <source file='${basepath}/disk.local'/> + <target dev='${disk_prefix}b' bus='${disk_bus}'/> + </disk> + #end if #end if <interface type='bridge'> <source bridge='${bridge_name}'/> @@ -66,14 +76,28 @@ <filterref filter="nova-instance-${name}"> <parameter name="IP" value="${ip_address}" /> <parameter name="DHCPSERVER" value="${dhcp_server}" /> + <parameter name="RASERVER" value="${ra_server}" /> #if $getVar('extra_params', False) ${extra_params} #end if </filterref> </interface> + + <!-- The order is significant here. File must be defined first --> <serial type="file"> <source path='${basepath}/console.log'/> <target port='1'/> </serial> + + <console type='pty' tty='/dev/pts/2'> + <source path='/dev/pts/2'/> + <target port='0'/> + </console> + + <serial type='pty'> + <source path='/dev/pts/2'/> + <target port='0'/> + </serial> + </devices> </domain> diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 00edfbdc8..f5b0bd365 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -36,9 +36,13 @@ Supports KVM, QEMU, UML, and XEN. """ -import logging import os import shutil +import random +import subprocess +import uuid +from xml.dom import minidom + from eventlet import greenthread from eventlet import event @@ -50,18 +54,20 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import utils #from nova.api import context from nova.auth import manager -from nova.compute import disk from nova.compute import instance_types from nova.compute import power_state +from nova.virt import disk from nova.virt import images libvirt = None libxml2 = None Template = None +LOG = logging.getLogger('nova.virt.libvirt_conn') FLAGS = flags.FLAGS # TODO(vish): These flags should probably go into a shared location @@ -85,6 +91,15 @@ flags.DEFINE_string('libvirt_uri', flags.DEFINE_bool('allow_project_net_traffic', True, 'Whether to allow in project network traffic') +flags.DEFINE_bool('use_cow_images', + True, + 'Whether to use cow images') +flags.DEFINE_string('ajaxterm_portrange', + '10000-12000', + 'Range of ports that ajaxterm should randomly try to bind') +flags.DEFINE_string('firewall_driver', + 'nova.virt.libvirt_conn.IptablesFirewallDriver', + 'Firewall driver (defaults to iptables)') def get_connection(read_only): @@ -115,6 +130,16 @@ def _get_net_and_mask(cidr): return str(net.net()), str(net.netmask()) +def _get_net_and_prefixlen(cidr): + net = IPy.IP(cidr) + return str(net.net()), str(net.prefixlen()) + + +def _get_ip_version(cidr): + net = IPy.IP(cidr) + return int(net.version()) + + class LibvirtConnection(object): def __init__(self, read_only): @@ -124,16 +149,24 @@ class LibvirtConnection(object): self._wrapped_conn = None self.read_only = read_only + self.nwfilter = NWFilterFirewall(self._get_connection) + + if not FLAGS.firewall_driver: + self.firewall_driver = self.nwfilter + self.nwfilter.handle_security_groups = True + else: + self.firewall_driver = utils.import_object(FLAGS.firewall_driver) + def init_host(self): - NWFilterFirewall(self._conn).setup_base_nwfilters() + pass - @property - def _conn(self): + def _get_connection(self): if not self._wrapped_conn or not self._test_connection(): - logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri) + LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri) self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn + _conn = property(_get_connection) def _test_connection(self): try: @@ -142,7 +175,7 @@ class LibvirtConnection(object): except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ e.get_error_domain() == libvirt.VIR_FROM_REMOTE: - logging.debug(_('Connection to libvirt broke')) + LOG.debug(_('Connection to libvirt broke')) return False raise @@ -177,45 +210,34 @@ class LibvirtConnection(object): pass # If the instance is already terminated, we're still happy - done = event.Event() - # We'll save this for when we do shutdown, # instead of destroy - but destroy returns immediately timer = utils.LoopingCall(f=None) - def _wait_for_shutdown(): + while True: try: state = self.get_info(instance['name'])['state'] db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.SHUTDOWN: - timer.stop() + break except Exception: db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) - timer.stop() + break - timer.f = _wait_for_shutdown - timer_done = timer.start(interval=0.5, now=True) + self.firewall_driver.unfilter_instance(instance) - # NOTE(termie): this is strictly superfluous (we could put the - # cleanup code in the timer), but this emulates the - # previous model so I am keeping it around until - # everything has been vetted a bit - def _wait_for_timer(): - timer_done.wait() - if cleanup: - self._cleanup(instance) - done.send() + if cleanup: + self._cleanup(instance) - greenthread.spawn(_wait_for_timer) - return done + return True def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) - logging.info(_('instance %s: deleting instance files %s'), - instance['name'], target) + LOG.info(_('instance %s: deleting instance files %s'), + instance['name'], target) if os.path.exists(target): shutil.rmtree(target) @@ -223,11 +245,24 @@ class LibvirtConnection(object): def attach_volume(self, instance_name, device_path, mountpoint): virt_dom = self._conn.lookupByName(instance_name) mount_device = mountpoint.rpartition("/")[2] - xml = """<disk type='block'> - <driver name='qemu' type='raw'/> - <source dev='%s'/> - <target dev='%s' bus='virtio'/> - </disk>""" % (device_path, mount_device) + if device_path.startswith('/dev/'): + xml = """<disk type='block'> + <driver name='qemu' type='raw'/> + <source dev='%s'/> + <target dev='%s' bus='virtio'/> + </disk>""" % (device_path, mount_device) + elif ':' in device_path: + (protocol, name) = device_path.split(':') + xml = """<disk type='network'> + <driver name='qemu' type='raw'/> + <source protocol='%s' name='%s'/> + <target dev='%s' bus='virtio'/> + </disk>""" % (protocol, + name, + mount_device) + else: + raise exception.Invalid(_("Invalid device path %s") % device_path) + virt_dom.attachDevice(xml) def _get_disk_xml(self, xml, device): @@ -260,7 +295,7 @@ class LibvirtConnection(object): virt_dom.detachDevice(xml) @exception.wrap_exception - def snapshot(self, instance, name): + def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance """ raise NotImplementedError( _("Instance snapshotting is not supported for libvirt" @@ -279,10 +314,10 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('instance %s: rebooted'), instance['name']) + LOG.debug(_('instance %s: rebooted'), instance['name']) timer.stop() except Exception, exn: - logging.error(_('_wait_for_reboot failed: %s'), exn) + LOG.exception(_('_wait_for_reboot failed: %s'), exn) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -315,7 +350,7 @@ class LibvirtConnection(object): rescue_images = {'image_id': FLAGS.rescue_image_id, 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} - self._create_image(instance, xml, 'rescue-', rescue_images) + self._create_image(instance, xml, '.rescue', rescue_images) self._conn.createXML(xml, 0) timer = utils.LoopingCall(f=None) @@ -325,10 +360,10 @@ class LibvirtConnection(object): state = self.get_info(instance['name'])['state'] db.instance_set_state(None, instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('instance %s: rescued'), instance['name']) + LOG.debug(_('instance %s: rescued'), instance['name']) timer.stop() except Exception, exn: - logging.error(_('_wait_for_rescue failed: %s'), exn) + LOG.exception(_('_wait_for_rescue failed: %s'), exn) db.instance_set_state(None, instance['id'], power_state.SHUTDOWN) @@ -350,10 +385,12 @@ class LibvirtConnection(object): instance['id'], power_state.NOSTATE, 'launching') - NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) + self.nwfilter.setup_basic_filtering(instance) + self.firewall_driver.prepare_instance_filter(instance) self._create_image(instance, xml) self._conn.createXML(xml, 0) - logging.debug(_("instance %s: is running"), instance['name']) + LOG.debug(_("instance %s: is running"), instance['name']) + self.firewall_driver.apply_instance_filter(instance) timer = utils.LoopingCall(f=None) @@ -363,11 +400,11 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('instance %s: booted'), instance['name']) + LOG.debug(_('instance %s: booted'), instance['name']) timer.stop() except: - logging.exception(_('instance %s: failed to boot'), - instance['name']) + LOG.exception(_('instance %s: failed to boot'), + instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -377,11 +414,11 @@ class LibvirtConnection(object): return timer.start(interval=0.5, now=True) def _flush_xen_console(self, virsh_output): - logging.info('virsh said: %r' % (virsh_output,)) + LOG.info(_('virsh said: %r'), virsh_output) virsh_output = virsh_output[0].strip() if virsh_output.startswith('/dev/'): - logging.info(_('cool, it\'s a device')) + LOG.info(_('cool, it\'s a device')) out, err = utils.execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) return out @@ -389,7 +426,7 @@ class LibvirtConnection(object): return '' def _append_to_file(self, data, fpath): - logging.info(_('data: %r, fpath: %r') % (data, fpath)) + LOG.info(_('data: %r, fpath: %r'), data, fpath) fp = open(fpath, 'a+') fp.write(data) return fpath @@ -397,7 +434,7 @@ class LibvirtConnection(object): def _dump_file(self, fpath): fp = open(fpath, 'r+') contents = fp.read() - logging.info('Contents: %r' % (contents,)) + LOG.info(_('Contents of file %s: %r'), fpath, contents) return contents @exception.wrap_exception @@ -418,25 +455,100 @@ class LibvirtConnection(object): return self._dump_file(fpath) - def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None): + @exception.wrap_exception + def get_ajax_console(self, instance): + def get_open_port(): + start_port, end_port = FLAGS.ajaxterm_portrange.split("-") + for i in xrange(0, 100): # don't loop forever + port = random.randint(int(start_port), int(end_port)) + # netcat will exit with 0 only if the port is in use, + # so a nonzero return value implies it is unused + cmd = 'netcat 0.0.0.0 %s -w 1 </dev/null || echo free' % (port) + stdout, stderr = utils.execute(cmd) + if stdout.strip() == 'free': + return port + raise Exception(_('Unable to find an open port')) + + def get_pty_for_instance(instance_name): + virt_dom = self._conn.lookupByName(instance_name) + xml = virt_dom.XMLDesc(0) + dom = minidom.parseString(xml) + + for serial in dom.getElementsByTagName('serial'): + if serial.getAttribute('type') == 'pty': + source = serial.getElementsByTagName('source')[0] + return source.getAttribute('path') + + port = get_open_port() + token = str(uuid.uuid4()) + host = instance['host'] + + ajaxterm_cmd = 'sudo socat - %s' \ + % get_pty_for_instance(instance['name']) + + cmd = '%s/tools/ajaxterm/ajaxterm.py --command "%s" -t %s -p %s' \ + % (utils.novadir(), ajaxterm_cmd, token, port) + + subprocess.Popen(cmd, shell=True) + return {'token': token, 'host': host, 'port': port} + + def _cache_image(self, fn, target, fname, cow=False, *args, **kwargs): + """Wrapper for a method that creates an image that caches the image. + + This wrapper will save the image into a common store and create a + copy for use by the hypervisor. + + The underlying method should specify a kwarg of target representing + where the image will be saved. + + fname is used as the filename of the base image. The filename needs + to be unique to a given image. + + If cow is True, it will make a CoW image instead of a copy. + """ + if not os.path.exists(target): + base_dir = os.path.join(FLAGS.instances_path, '_base') + if not os.path.exists(base_dir): + os.mkdir(base_dir) + os.chmod(base_dir, 0777) + base = os.path.join(base_dir, fname) + if not os.path.exists(base): + fn(target=base, *args, **kwargs) + if cow: + utils.execute('qemu-img create -f qcow2 -o ' + 'cluster_size=2M,backing_file=%s %s' + % (base, target)) + else: + utils.execute('cp %s %s' % (base, target)) + + def _fetch_image(self, target, image_id, user, project, size=None): + """Grab image and optionally attempt to resize it""" + images.fetch(image_id, target, user, project) + if size: + disk.extend(target, size) + + def _create_local(self, target, local_gb): + """Create a blank image of specified size""" + utils.execute('truncate %s -s %dG' % (target, local_gb)) + # TODO(vish): should we format disk by default? + + def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None): # syntactic nicety - basepath = lambda fname = '', prefix = prefix: os.path.join( - FLAGS.instances_path, - inst['name'], - prefix + fname) + def basepath(fname='', suffix=suffix): + return os.path.join(FLAGS.instances_path, + inst['name'], + fname + suffix) # ensure directories exist and are writable - utils.execute('mkdir -p %s' % basepath(prefix='')) - utils.execute('chmod 0777 %s' % basepath(prefix='')) + utils.execute('mkdir -p %s' % basepath(suffix='')) + utils.execute('chmod 0777 %s' % basepath(suffix='')) - # TODO(termie): these are blocking calls, it would be great - # if they weren't. - logging.info(_('instance %s: Creating image'), inst['name']) + LOG.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() - # NOTE(vish): No need add the prefix to console.log + # NOTE(vish): No need add the suffix to console.log os.close(os.open(basepath('console.log', ''), os.O_CREAT | os.O_WRONLY, 0660)) @@ -447,23 +559,44 @@ class LibvirtConnection(object): disk_images = {'image_id': inst['image_id'], 'kernel_id': inst['kernel_id'], 'ramdisk_id': inst['ramdisk_id']} - if not os.path.exists(basepath('disk')): - images.fetch(inst.image_id, basepath('disk-raw'), user, - project) - - if inst['kernel_id']: - if not os.path.exists(basepath('kernel')): - images.fetch(inst['kernel_id'], basepath('kernel'), - user, project) - if inst['ramdisk_id']: - if not os.path.exists(basepath('ramdisk')): - images.fetch(inst['ramdisk_id'], basepath('ramdisk'), - user, project) - - def execute(cmd, process_input=None, check_exit_code=True): - return utils.execute(cmd=cmd, - process_input=process_input, - check_exit_code=check_exit_code) + + if disk_images['kernel_id']: + self._cache_image(fn=self._fetch_image, + target=basepath('kernel'), + fname=disk_images['kernel_id'], + image_id=disk_images['kernel_id'], + user=user, + project=project) + if disk_images['ramdisk_id']: + self._cache_image(fn=self._fetch_image, + target=basepath('ramdisk'), + fname=disk_images['ramdisk_id'], + image_id=disk_images['ramdisk_id'], + user=user, + project=project) + + root_fname = disk_images['image_id'] + size = FLAGS.minimum_root_size + if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue': + size = None + root_fname += "_sm" + + self._cache_image(fn=self._fetch_image, + target=basepath('disk'), + fname=root_fname, + cow=FLAGS.use_cow_images, + image_id=disk_images['image_id'], + user=user, + project=project, + size=size) + type_data = instance_types.INSTANCE_TYPES[inst['instance_type']] + + if type_data['local_gb']: + self._cache_image(fn=self._create_local, + target=basepath('disk.local'), + fname="local_%s" % type_data['local_gb'], + cow=FLAGS.use_cow_images, + local_gb=type_data['local_gb']) # For now, we assume that if we're not using a kernel, we're using a # partitioned disk image where the target partition is the first @@ -479,55 +612,42 @@ class LibvirtConnection(object): if network_ref['injected']: admin_context = context.get_admin_context() address = db.instance_get_fixed_address(admin_context, inst['id']) + ra_server = network_ref['ra_server'] + if not ra_server: + ra_server = "fd00::" with open(FLAGS.injected_network_template) as f: net = f.read() % {'address': address, 'netmask': network_ref['netmask'], 'gateway': network_ref['gateway'], 'broadcast': network_ref['broadcast'], - 'dns': network_ref['dns']} + 'dns': network_ref['dns'], + 'ra_server': ra_server} if key or net: if key: - logging.info(_('instance %s: injecting key into image %s'), + LOG.info(_('instance %s: injecting key into image %s'), inst['name'], inst.image_id) if net: - logging.info(_('instance %s: injecting net into image %s'), + LOG.info(_('instance %s: injecting net into image %s'), inst['name'], inst.image_id) try: - disk.inject_data(basepath('disk-raw'), key, net, + disk.inject_data(basepath('disk'), key, net, partition=target_partition, - execute=execute) + nbd=FLAGS.use_cow_images) except Exception as e: # This could be a windows image, or a vmdk format disk - logging.warn(_('instance %s: ignoring error injecting data' - ' into image %s (%s)'), - inst['name'], inst.image_id, e) - - if inst['kernel_id']: - if os.path.exists(basepath('disk')): - utils.execute('rm -f %s' % basepath('disk')) - - local_bytes = (instance_types.INSTANCE_TYPES[inst.instance_type] - ['local_gb'] - * 1024 * 1024 * 1024) - - resize = True - if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': - resize = False - - if inst['kernel_id']: - disk.partition(basepath('disk-raw'), basepath('disk'), - local_bytes, resize, execute=execute) - else: - os.rename(basepath('disk-raw'), basepath('disk')) - disk.extend(basepath('disk'), local_bytes, execute=execute) + LOG.warn(_('instance %s: ignoring error injecting data' + ' into image %s (%s)'), + inst['name'], inst.image_id, e) if FLAGS.libvirt_type == 'uml': utils.execute('sudo chown root %s' % basepath('disk')) def to_xml(self, instance, rescue=False): # TODO(termie): cache? - logging.debug(_('instance %s: starting toXML method'), - instance['name']) + LOG.debug(_('instance %s: starting toXML method'), instance['name']) + network = db.project_get_network(context.get_admin_context(), + instance['project_id']) + LOG.debug(_('instance %s: starting toXML method'), instance['name']) network = db.network_get_by_instance(context.get_admin_context(), instance['id']) # FIXME(vish): stick this in db @@ -537,15 +657,36 @@ class LibvirtConnection(object): instance['id']) # Assume that the gateway also acts as the dhcp server. dhcp_server = network['gateway'] - + ra_server = network['ra_server'] + if not ra_server: + ra_server = 'fd00::' if FLAGS.allow_project_net_traffic: - net, mask = _get_net_and_mask(network['cidr']) - extra_params = ("<parameter name=\"PROJNET\" " + if FLAGS.use_ipv6: + net, mask = _get_net_and_mask(network['cidr']) + net_v6, prefixlen_v6 = _get_net_and_prefixlen( + network['cidr_v6']) + extra_params = ("<parameter name=\"PROJNET\" " "value=\"%s\" />\n" "<parameter name=\"PROJMASK\" " - "value=\"%s\" />\n") % (net, mask) + "value=\"%s\" />\n" + "<parameter name=\"PROJNETV6\" " + "value=\"%s\" />\n" + "<parameter name=\"PROJMASKV6\" " + "value=\"%s\" />\n") % \ + (net, mask, net_v6, prefixlen_v6) + else: + net, mask = _get_net_and_mask(network['cidr']) + extra_params = ("<parameter name=\"PROJNET\" " + "value=\"%s\" />\n" + "<parameter name=\"PROJMASK\" " + "value=\"%s\" />\n") % \ + (net, mask) else: extra_params = "\n" + if FLAGS.use_cow_images: + driver_type = 'qcow2' + else: + driver_type = 'raw' xml_info = {'type': FLAGS.libvirt_type, 'name': instance['name'], @@ -557,8 +698,11 @@ class LibvirtConnection(object): 'mac_address': instance['mac_address'], 'ip_address': ip_address, 'dhcp_server': dhcp_server, + 'ra_server': ra_server, 'extra_params': extra_params, - 'rescue': rescue} + 'rescue': rescue, + 'local': instance_type['local_gb'], + 'driver_type': driver_type} if not rescue: if instance['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" @@ -569,7 +713,7 @@ class LibvirtConnection(object): xml_info['disk'] = xml_info['basepath'] + "/disk" xml = str(Template(self.libvirt_xml, searchList=[xml_info])) - logging.debug(_('instance %s: finished toXML method'), + LOG.debug(_('instance %s: finished toXML method'), instance['name']) return xml @@ -690,18 +834,67 @@ class LibvirtConnection(object): domain = self._conn.lookupByName(instance_name) return domain.interfaceStats(interface) - def refresh_security_group(self, security_group_id): - fw = NWFilterFirewall(self._conn) - fw.ensure_security_group_filter(security_group_id) + def get_console_pool_info(self, console_type): + #TODO(mdragon): console proxy should be implemented for libvirt, + # in case someone wants to use it with kvm or + # such. For now return fake data. + return {'address': '127.0.0.1', + 'username': 'fakeuser', + 'password': 'fakepassword'} + + def refresh_security_group_rules(self, security_group_id): + self.firewall_driver.refresh_security_group_rules(security_group_id) + def refresh_security_group_members(self, security_group_id): + self.firewall_driver.refresh_security_group_members(security_group_id) -class NWFilterFirewall(object): + +class FirewallDriver(object): + def prepare_instance_filter(self, instance): + """Prepare filters for the instance. + + At this point, the instance isn't running yet.""" + raise NotImplementedError() + + def unfilter_instance(self, instance): + """Stop filtering instance""" + raise NotImplementedError() + + def apply_instance_filter(self, instance): + """Apply instance filter. + + Once this method returns, the instance should be firewalled + appropriately. This method should as far as possible be a + no-op. It's vastly preferred to get everything set up in + prepare_instance_filter. + """ + raise NotImplementedError() + + def refresh_security_group_rules(self, security_group_id): + """Refresh security group rules from data store + + Gets called when a rule has been added to or removed from + the security group.""" + raise NotImplementedError() + + def refresh_security_group_members(self, security_group_id): + """Refresh security group members from data store + + Gets called when an instance gets added to or removed from + the security group.""" + raise NotImplementedError() + + +class NWFilterFirewall(FirewallDriver): """ This class implements a network filtering mechanism versatile enough for EC2 style Security Group filtering by leveraging libvirt's nwfilter. First, all instances get a filter ("nova-base-filter") applied. + This filter provides some basic security such as protection against + MAC spoofing, IP spoofing, and ARP spoofing. + This filter drops all incoming ipv4 and ipv6 connections. Outgoing connections are never blocked. @@ -735,39 +928,92 @@ class NWFilterFirewall(object): (*) This sentence brought to you by the redundancy department of redundancy. + """ def __init__(self, get_connection): - self._conn = get_connection - - nova_base_filter = '''<filter name='nova-base' chain='root'> - <uuid>26717364-50cf-42d1-8185-29bf893ab110</uuid> - <filterref filter='no-mac-spoofing'/> - <filterref filter='no-ip-spoofing'/> - <filterref filter='no-arp-spoofing'/> - <filterref filter='allow-dhcp-server'/> - <filterref filter='nova-allow-dhcp-server'/> - <filterref filter='nova-base-ipv4'/> - <filterref filter='nova-base-ipv6'/> - </filter>''' - - nova_dhcp_filter = '''<filter name='nova-allow-dhcp-server' chain='ipv4'> - <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid> - <rule action='accept' direction='out' + self._libvirt_get_connection = get_connection + self.static_filters_configured = False + self.handle_security_groups = False + + def _get_connection(self): + return self._libvirt_get_connection() + _conn = property(_get_connection) + + def nova_dhcp_filter(self): + """The standard allow-dhcp-server filter is an <ip> one, so it uses + ebtables to allow traffic through. Without a corresponding rule in + iptables, it'll get blocked anyway.""" + + return '''<filter name='nova-allow-dhcp-server' chain='ipv4'> + <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid> + <rule action='accept' direction='out' + priority='100'> + <udp srcipaddr='0.0.0.0' + dstipaddr='255.255.255.255' + srcportstart='68' + dstportstart='67'/> + </rule> + <rule action='accept' direction='in' + priority='100'> + <udp srcipaddr='$DHCPSERVER' + srcportstart='67' + dstportstart='68'/> + </rule> + </filter>''' + + def nova_ra_filter(self): + return '''<filter name='nova-allow-ra-server' chain='root'> + <uuid>d707fa71-4fb5-4b27-9ab7-ba5ca19c8804</uuid> + <rule action='accept' direction='inout' priority='100'> - <udp srcipaddr='0.0.0.0' - dstipaddr='255.255.255.255' - srcportstart='68' - dstportstart='67'/> - </rule> - <rule action='accept' direction='in' - priority='100'> - <udp srcipaddr='$DHCPSERVER' - srcportstart='67' - dstportstart='68'/> + <icmpv6 srcipaddr='$RASERVER'/> </rule> </filter>''' + def setup_basic_filtering(self, instance): + """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" + logging.info('called setup_basic_filtering in nwfilter') + + if self.handle_security_groups: + # No point in setting up a filter set that we'll be overriding + # anyway. + return + + logging.info('ensuring static filters') + self._ensure_static_filters() + + instance_filter_name = self._instance_filter_name(instance) + self._define_filter(self._filter_container(instance_filter_name, + ['nova-base'])) + + def _ensure_static_filters(self): + if self.static_filters_configured: + return + + self._define_filter(self._filter_container('nova-base', + ['no-mac-spoofing', + 'no-ip-spoofing', + 'no-arp-spoofing', + 'allow-dhcp-server'])) + self._define_filter(self.nova_base_ipv4_filter) + self._define_filter(self.nova_base_ipv6_filter) + self._define_filter(self.nova_dhcp_filter) + self._define_filter(self.nova_ra_filter) + self._define_filter(self.nova_vpn_filter) + if FLAGS.allow_project_net_traffic: + self._define_filter(self.nova_project_filter) + if FLAGS.use_ipv6: + self._define_filter(self.nova_project_filter_v6) + + self.static_filters_configured = True + + def _filter_container(self, name, filters): + xml = '''<filter name='%s' chain='root'>%s</filter>''' % ( + name, + ''.join(["<filterref filter='%s'/>" % (f,) for f in filters])) + return xml + nova_vpn_filter = '''<filter name='nova-vpn' chain='root'> <uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid> <filterref filter='allow-dhcp-server'/> @@ -780,7 +1026,7 @@ class NWFilterFirewall(object): retval = "<filter name='nova-base-ipv4' chain='ipv4'>" for protocol in ['tcp', 'udp', 'icmp']: for direction, action, priority in [('out', 'accept', 399), - ('inout', 'drop', 400)]: + ('in', 'drop', 400)]: retval += """<rule action='%s' direction='%s' priority='%d'> <%s /> </rule>""" % (action, direction, @@ -790,13 +1036,13 @@ class NWFilterFirewall(object): def nova_base_ipv6_filter(self): retval = "<filter name='nova-base-ipv6' chain='ipv6'>" - for protocol in ['tcp', 'udp', 'icmp']: + for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: for direction, action, priority in [('out', 'accept', 399), - ('inout', 'drop', 400)]: + ('in', 'drop', 400)]: retval += """<rule action='%s' direction='%s' priority='%d'> - <%s-ipv6 /> + <%s /> </rule>""" % (action, direction, - priority, protocol) + priority, protocol) retval += '</filter>' return retval @@ -809,50 +1055,73 @@ class NWFilterFirewall(object): retval += '</filter>' return retval + def nova_project_filter_v6(self): + retval = "<filter name='nova-project-v6' chain='ipv6'>" + for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: + retval += """<rule action='accept' direction='inout' + priority='200'> + <%s srcipaddr='$PROJNETV6' + srcipmask='$PROJMASKV6' /> + </rule>""" % (protocol) + retval += '</filter>' + return retval + def _define_filter(self, xml): if callable(xml): xml = xml() - # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def setup_base_nwfilters(self): - self._define_filter(self.nova_base_ipv4_filter) - self._define_filter(self.nova_base_ipv6_filter) - self._define_filter(self.nova_dhcp_filter) - self._define_filter(self.nova_base_filter) - self._define_filter(self.nova_vpn_filter) - if FLAGS.allow_project_net_traffic: - self._define_filter(self.nova_project_filter) + def unfilter_instance(self, instance): + # Nothing to do + pass - def setup_nwfilters_for_instance(self, instance): + def prepare_instance_filter(self, instance): """ Creates an NWFilter for the given instance. In the process, it makes sure the filters for the security groups as well as the base filter are all in place. """ - - nwfilter_xml = ("<filter name='nova-instance-%s' " - "chain='root'>\n") % instance['name'] - if instance['image_id'] == FLAGS.vpn_image_id: - nwfilter_xml += " <filterref filter='nova-vpn' />\n" + base_filter = 'nova-vpn' else: - nwfilter_xml += " <filterref filter='nova-base' />\n" + base_filter = 'nova-base' + + instance_filter_name = self._instance_filter_name(instance) + instance_secgroup_filter_name = '%s-secgroup' % (instance_filter_name,) + instance_filter_children = [base_filter, instance_secgroup_filter_name] + instance_secgroup_filter_children = ['nova-base-ipv4', + 'nova-base-ipv6', + 'nova-allow-dhcp-server'] + if FLAGS.use_ipv6: + instance_secgroup_filter_children += ['nova-allow-ra-server'] + + ctxt = context.get_admin_context() if FLAGS.allow_project_net_traffic: - nwfilter_xml += " <filterref filter='nova-project' />\n" + instance_filter_children += ['nova-project'] + if FLAGS.use_ipv6: + instance_filter_children += ['nova-project-v6'] + + for security_group in db.security_group_get_by_instance(ctxt, + instance['id']): - for security_group in instance.security_groups: - self.ensure_security_group_filter(security_group['id']) + self.refresh_security_group_rules(security_group['id']) - nwfilter_xml += (" <filterref filter='nova-secgroup-%d' " - "/>\n") % security_group['id'] - nwfilter_xml += "</filter>" + instance_secgroup_filter_children += [('nova-secgroup-%s' % + security_group['id'])] - self._define_filter(nwfilter_xml) + self._define_filter( + self._filter_container(instance_secgroup_filter_name, + instance_secgroup_filter_children)) - def ensure_security_group_filter(self, security_group_id): + self._define_filter( + self._filter_container(instance_filter_name, + instance_filter_children)) + + return + + def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -860,19 +1129,26 @@ class NWFilterFirewall(object): security_group = db.security_group_get(context.get_admin_context(), security_group_id) rule_xml = "" + v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'} for rule in security_group.rules: rule_xml += "<rule action='accept' direction='in' priority='300'>" if rule.cidr: - net, mask = _get_net_and_mask(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (rule.protocol, net, mask) + version = _get_ip_version(rule.cidr) + if(FLAGS.use_ipv6 and version == 6): + net, prefixlen = _get_net_and_prefixlen(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (v6protocol[rule.protocol], net, prefixlen) + else: + net, mask = _get_net_and_mask(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (rule.protocol, net, mask) if rule.protocol in ['tcp', 'udp']: rule_xml += "dstportstart='%s' dstportend='%s' " % \ (rule.from_port, rule.to_port) elif rule.protocol == 'icmp': - logging.info('rule.protocol: %r, rule.from_port: %r, ' - 'rule.to_port: %r' % - (rule.protocol, rule.from_port, rule.to_port)) + LOG.info('rule.protocol: %r, rule.from_port: %r, ' + 'rule.to_port: %r', rule.protocol, + rule.from_port, rule.to_port) if rule.from_port != -1: rule_xml += "type='%s' " % rule.from_port if rule.to_port != -1: @@ -880,6 +1156,214 @@ class NWFilterFirewall(object): rule_xml += '/>\n' rule_xml += "</rule>\n" - xml = "<filter name='nova-secgroup-%s' chain='ipv4'>%s</filter>" % \ - (security_group_id, rule_xml,) + xml = "<filter name='nova-secgroup-%s' " % security_group_id + if(FLAGS.use_ipv6): + xml += "chain='root'>%s</filter>" % rule_xml + else: + xml += "chain='ipv4'>%s</filter>" % rule_xml return xml + + def _instance_filter_name(self, instance): + return 'nova-instance-%s' % instance['name'] + + +class IptablesFirewallDriver(FirewallDriver): + def __init__(self, execute=None): + self.execute = execute or utils.execute + self.instances = {} + + def apply_instance_filter(self, instance): + """No-op. Everything is done in prepare_instance_filter""" + pass + + def remove_instance(self, instance): + if instance['id'] in self.instances: + del self.instances[instance['id']] + else: + LOG.info(_('Attempted to unfilter instance %s which is not ' + 'filtered'), instance['id']) + + def add_instance(self, instance): + self.instances[instance['id']] = instance + + def unfilter_instance(self, instance): + self.remove_instance(instance) + self.apply_ruleset() + + def prepare_instance_filter(self, instance): + self.add_instance(instance) + self.apply_ruleset() + + def apply_ruleset(self): + current_filter, _ = self.execute('sudo iptables-save -t filter') + current_lines = current_filter.split('\n') + new_filter = self.modify_rules(current_lines, 4) + self.execute('sudo iptables-restore', + process_input='\n'.join(new_filter)) + if(FLAGS.use_ipv6): + current_filter, _ = self.execute('sudo ip6tables-save -t filter') + current_lines = current_filter.split('\n') + new_filter = self.modify_rules(current_lines, 6) + self.execute('sudo ip6tables-restore', + process_input='\n'.join(new_filter)) + + def modify_rules(self, current_lines, ip_version=4): + ctxt = context.get_admin_context() + # Remove any trace of nova rules. + new_filter = filter(lambda l: 'nova-' not in l, current_lines) + + seen_chains = False + for rules_index in range(len(new_filter)): + if not seen_chains: + if new_filter[rules_index].startswith(':'): + seen_chains = True + elif seen_chains == 1: + if not new_filter[rules_index].startswith(':'): + break + + our_chains = [':nova-fallback - [0:0]'] + our_rules = ['-A nova-fallback -j DROP'] + + our_chains += [':nova-local - [0:0]'] + our_rules += ['-A FORWARD -j nova-local'] + + security_groups = {} + # Add our chains + # First, we add instance chains and rules + for instance_id in self.instances: + instance = self.instances[instance_id] + chain_name = self._instance_chain_name(instance) + if(ip_version == 4): + ip_address = self._ip_for_instance(instance) + elif(ip_version == 6): + ip_address = self._ip_for_instance_v6(instance) + + our_chains += [':%s - [0:0]' % chain_name] + + # Jump to the per-instance chain + our_rules += ['-A nova-local -d %s -j %s' % (ip_address, + chain_name)] + + # Always drop invalid packets + our_rules += ['-A %s -m state --state ' + 'INVALID -j DROP' % (chain_name,)] + + # Allow established connections + our_rules += ['-A %s -m state --state ' + 'ESTABLISHED,RELATED -j ACCEPT' % (chain_name,)] + + # Jump to each security group chain in turn + for security_group in \ + db.security_group_get_by_instance(ctxt, + instance['id']): + security_groups[security_group['id']] = security_group + + sg_chain_name = self._security_group_chain_name( + security_group['id']) + + our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)] + + if(ip_version == 4): + # Allow DHCP responses + dhcp_server = self._dhcp_server_for_instance(instance) + our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68' % + (chain_name, dhcp_server)] + elif(ip_version == 6): + # Allow RA responses + ra_server = self._ra_server_for_instance(instance) + our_rules += ['-A %s -s %s -p icmpv6' % + (chain_name, ra_server)] + + # If nothing matches, jump to the fallback chain + our_rules += ['-A %s -j nova-fallback' % (chain_name,)] + + # then, security group chains and rules + for security_group_id in security_groups: + chain_name = self._security_group_chain_name(security_group_id) + our_chains += [':%s - [0:0]' % chain_name] + + rules = \ + db.security_group_rule_get_by_security_group(ctxt, + security_group_id) + + for rule in rules: + logging.info('%r', rule) + + if not rule.cidr: + # Eventually, a mechanism to grant access for security + # groups will turn up here. It'll use ipsets. + continue + + version = _get_ip_version(rule.cidr) + if version != ip_version: + continue + + protocol = rule.protocol + if version == 6 and rule.protocol == 'icmp': + protocol = 'icmpv6' + + args = ['-A', chain_name, '-p', protocol, '-s', rule.cidr] + + if rule.protocol in ['udp', 'tcp']: + if rule.from_port == rule.to_port: + args += ['--dport', '%s' % (rule.from_port,)] + else: + args += ['-m', 'multiport', + '--dports', '%s:%s' % (rule.from_port, + rule.to_port)] + elif rule.protocol == 'icmp': + icmp_type = rule.from_port + icmp_code = rule.to_port + + if icmp_type == -1: + icmp_type_arg = None + else: + icmp_type_arg = '%s' % icmp_type + if not icmp_code == -1: + icmp_type_arg += '/%s' % icmp_code + + if icmp_type_arg: + if(ip_version == 4): + args += ['-m', 'icmp', '--icmp-type', + icmp_type_arg] + elif(ip_version == 6): + args += ['-m', 'icmp6', '--icmpv6-type', + icmp_type_arg] + + args += ['-j ACCEPT'] + our_rules += [' '.join(args)] + + new_filter[rules_index:rules_index] = our_rules + new_filter[rules_index:rules_index] = our_chains + logging.info('new_filter: %s', '\n'.join(new_filter)) + return new_filter + + def refresh_security_group_members(self, security_group): + pass + + def refresh_security_group_rules(self, security_group): + self.apply_ruleset() + + def _security_group_chain_name(self, security_group_id): + return 'nova-sg-%s' % (security_group_id,) + + def _instance_chain_name(self, instance): + return 'nova-inst-%s' % (instance['id'],) + + def _ip_for_instance(self, instance): + return db.instance_get_fixed_address(context.get_admin_context(), + instance['id']) + + def _ip_for_instance_v6(self, instance): + return db.instance_get_fixed_address_v6(context.get_admin_context(), + instance['id']) + + def _dhcp_server_for_instance(self, instance): + network = db.project_get_network(context.get_admin_context(), + instance['project_id']) + return network['gateway'] + + def _ra_server_for_instance(self, instance): + network = db.project_get_network(context.get_admin_context(), + instance['project_id']) + return network['ra_server'] diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index aa4026f97..4bfaf4b57 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -52,12 +52,12 @@ A fake XenAPI SDK. import datetime -import logging import uuid from pprint import pformat from nova import exception +from nova import log as logging _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\ @@ -65,15 +65,18 @@ _CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\ _db_content = {} +LOG = logging.getLogger("nova.virt.xenapi.fake") + def log_db_contents(msg=None): - logging.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content)) + LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content)) def reset(): for c in _CLASSES: _db_content[c] = {} create_host('fake') + create_vm('fake', 'Running', is_a_template=False, is_control_domain=True) def create_host(name_label): @@ -134,14 +137,21 @@ def create_vdi(name_label, read_only, sr_ref, sharable): def create_vbd(vm_ref, vdi_ref): - vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref} + vbd_rec = { + 'VM': vm_ref, + 'VDI': vdi_ref, + 'currently_attached': False, + } vbd_ref = _create_object('VBD', vbd_rec) after_VBD_create(vbd_ref, vbd_rec) return vbd_ref def after_VBD_create(vbd_ref, vbd_rec): - """Create backref from VM to VBD when VBD is created""" + """Create read-only fields and backref from VM to VBD when VBD is + created.""" + vbd_rec['currently_attached'] = False + vbd_rec['device'] = '' vm_ref = vbd_rec['VM'] vm_rec = _db_content['VM'][vm_ref] vm_rec['VBDs'] = [vbd_ref] @@ -150,9 +160,10 @@ def after_VBD_create(vbd_ref, vbd_rec): vbd_rec['vm_name_label'] = vm_name_label -def create_pbd(config, sr_ref, attached): +def create_pbd(config, host_ref, sr_ref, attached): return _create_object('PBD', { 'device-config': config, + 'host': host_ref, 'SR': sr_ref, 'currently-attached': attached, }) @@ -165,6 +176,33 @@ def create_task(name_label): }) +def create_local_srs(): + """Create an SR that looks like the one created on the local disk by + default by the XenServer installer. Do this one per host.""" + for host_ref in _db_content['host'].keys(): + _create_local_sr(host_ref) + + +def _create_local_sr(host_ref): + sr_ref = _create_object('SR', { + 'name_label': 'Local storage', + 'type': 'lvm', + 'content_type': 'user', + 'shared': False, + 'physical_size': str(1 << 30), + 'physical_utilisation': str(0), + 'virtual_allocation': str(0), + 'other_config': { + 'i18n-original-value-name_label': 'Local storage', + 'i18n-key': 'local-storage', + }, + 'VDIs': [] + }) + pbd_ref = create_pbd('', host_ref, sr_ref, True) + _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] + return sr_ref + + def _create_object(table, obj): ref = str(uuid.uuid4()) obj['uuid'] = str(uuid.uuid4()) @@ -177,9 +215,10 @@ def _create_sr(table, obj): # Forces fake to support iscsi only if sr_type != 'iscsi': raise Failure(['SR_UNKNOWN_DRIVER', sr_type]) + host_ref = _db_content['host'].keys()[0] sr_ref = _create_object(table, obj[2]) vdi_ref = create_vdi('', False, sr_ref, False) - pbd_ref = create_pbd('', sr_ref, True) + pbd_ref = create_pbd('', host_ref, sr_ref, True) _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] _db_content['VDI'][vdi_ref]['SR'] = sr_ref @@ -231,6 +270,20 @@ class SessionBase(object): def __init__(self, uri): self._session = None + def VBD_plug(self, _1, ref): + rec = get_record('VBD', ref) + if rec['currently_attached']: + raise Failure(['DEVICE_ALREADY_ATTACHED', ref]) + rec['currently_attached'] = True + rec['device'] = rec['userdevice'] + + def VBD_unplug(self, _1, ref): + rec = get_record('VBD', ref) + if not rec['currently_attached']: + raise Failure(['DEVICE_ALREADY_DETACHED', ref]) + rec['currently_attached'] = False + rec['device'] = '' + def xenapi_request(self, methodname, params): if methodname.startswith('login'): self._login(methodname, params) @@ -242,9 +295,9 @@ class SessionBase(object): full_params = (self._session,) + params meth = getattr(self, methodname, None) if meth is None: - logging.warn('Raising NotImplemented') + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError( - 'xenapi.fake does not have an implementation for %s' % + _('xenapi.fake does not have an implementation for %s') % methodname) return meth(*full_params) @@ -278,15 +331,17 @@ class SessionBase(object): if impl is not None: def callit(*params): - logging.warn('Calling %s %s', name, impl) + LOG.debug(_('Calling %s %s'), name, impl) self._check_session(params) return impl(*params) return callit if self._is_gettersetter(name, True): - logging.warn('Calling getter %s', name) + LOG.debug(_('Calling getter %s'), name) return lambda *params: self._getter(name, params) elif self._is_create(name): return lambda *params: self._create(name, params) + elif self._is_destroy(name): + return lambda *params: self._destroy(name, params) else: return None @@ -297,10 +352,16 @@ class SessionBase(object): bits[1].startswith(getter and 'get_' or 'set_')) def _is_create(self, name): + return self._is_method(name, 'create') + + def _is_destroy(self, name): + return self._is_method(name, 'destroy') + + def _is_method(self, name, meth): bits = name.split('.') return (len(bits) == 2 and bits[0] in _CLASSES and - bits[1] == 'create') + bits[1] == meth) def _getter(self, name, params): self._check_session(params) @@ -333,10 +394,10 @@ class SessionBase(object): field in _db_content[cls][ref]): return _db_content[cls][ref][field] - logging.error('Raising NotImplemented') + LOG.debuug(_('Raising NotImplemented')) raise NotImplementedError( - 'xenapi.fake does not have an implementation for %s or it has ' - 'been called with the wrong number of arguments' % name) + _('xenapi.fake does not have an implementation for %s or it has ' + 'been called with the wrong number of arguments') % name) def _setter(self, name, params): self._check_session(params) @@ -351,7 +412,7 @@ class SessionBase(object): field in _db_content[cls][ref]): _db_content[cls][ref][field] = val - logging.warn('Raising NotImplemented') + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError( 'xenapi.fake does not have an implementation for %s or it has ' 'been called with the wrong number of arguments or the database ' @@ -368,10 +429,9 @@ class SessionBase(object): _create_sr(cls, params) or _create_object(cls, params[1]) # Call hook to provide any fixups needed (ex. creating backrefs) - try: - globals()["after_%s_create" % cls](ref, params[1]) - except KeyError: - pass + after_hook = 'after_%s_create' % cls + if after_hook in globals(): + globals()[after_hook](ref, params[1]) obj = get_record(cls, ref) @@ -381,6 +441,15 @@ class SessionBase(object): return ref + def _destroy(self, name, params): + self._check_session(params) + self._check_arg_count(params, 2) + table, _ = name.split('.') + ref = params[1] + if ref not in _db_content[table]: + raise Failure(['HANDLE_INVALID', table, ref]) + del _db_content[table][ref] + def _async(self, name, params): task_ref = create_task(name) task = _db_content['task'][task_ref] @@ -399,7 +468,7 @@ class SessionBase(object): self._session not in _db_content['session']): raise Failure(['HANDLE_INVALID', 'session', self._session]) if len(params) == 0 or params[0] != self._session: - logging.warn('Raising NotImplemented') + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError('Call to XenAPI without using .xenapi') def _check_arg_count(self, params, expected): @@ -418,7 +487,7 @@ class SessionBase(object): try: return result[0] except IndexError: - return None + raise Failure(['UUID_INVALID', v, result, recs, k]) return result diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 9d1b51848..b80ff4dba 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,14 +19,17 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ -import logging +import os import pickle +import re import urllib from xml.dom import minidom from eventlet import event +import glance.client from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types @@ -37,6 +40,7 @@ from nova.virt.xenapi.volume_utils import StorageError FLAGS = flags.FLAGS +LOG = logging.getLogger("nova.virt.xenapi.vm_utils") XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -46,17 +50,23 @@ XENAPI_POWER_STATE = { 'Crashed': power_state.CRASHED} +SECTOR_SIZE = 512 +MBR_SIZE_SECTORS = 63 +MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE +KERNEL_DIR = '/boot/guest' + + class ImageType: - """ - Enumeration class for distinguishing different image types - 0 - kernel/ramdisk image (goes on dom0's filesystem) - 1 - disk image (local SR, partitioned by objectstore plugin) - 2 - raw disk image (local SR, NOT partitioned by plugin) - """ + """ + Enumeration class for distinguishing different image types + 0 - kernel/ramdisk image (goes on dom0's filesystem) + 1 - disk image (local SR, partitioned by objectstore plugin) + 2 - raw disk image (local SR, NOT partitioned by plugin) + """ - KERNEL_RAMDISK = 0 - DISK = 1 - DISK_RAW = 2 + KERNEL_RAMDISK = 0 + DISK = 1 + DISK_RAW = 2 class VMHelper(HelperBase): @@ -121,9 +131,9 @@ class VMHelper(HelperBase): rec['HVM_boot_params'] = {'order': 'dc'} rec['platform'] = {'acpi': 'true', 'apic': 'true', 'pae': 'true', 'viridian': 'true'} - logging.debug('Created VM %s...', instance.name) + LOG.debug(_('Created VM %s...'), instance.name) vm_ref = session.call_xenapi('VM.create', rec) - logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref) + LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref) return vm_ref @classmethod @@ -143,10 +153,9 @@ class VMHelper(HelperBase): vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] - logging.debug(_('Creating VBD for VM %s, VDI %s ... '), - vm_ref, vdi_ref) + LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref) vbd_ref = session.call_xenapi('VBD.create', vbd_rec) - logging.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref, + LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref, vdi_ref) return vbd_ref @@ -161,7 +170,7 @@ class VMHelper(HelperBase): if vbd_rec['userdevice'] == str(number): return vbd except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('VBD not found in instance %s') % vm_ref) @classmethod @@ -170,7 +179,7 @@ class VMHelper(HelperBase): try: vbd_ref = session.call_xenapi('VBD.unplug', vbd_ref) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) if exc.details[0] != 'DEVICE_ALREADY_DETACHED': raise StorageError(_('Unable to unplug VBD %s') % vbd_ref) @@ -183,7 +192,7 @@ class VMHelper(HelperBase): #with Josh Kearney session.wait_for_task(0, task) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to destroy VBD %s') % vbd_ref) @classmethod @@ -199,22 +208,40 @@ class VMHelper(HelperBase): vif_rec['other_config'] = {} vif_rec['qos_algorithm_type'] = '' vif_rec['qos_algorithm_params'] = {} - logging.debug(_('Creating VIF for VM %s, network %s.'), vm_ref, - network_ref) + LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref, + network_ref) vif_ref = session.call_xenapi('VIF.create', vif_rec) - logging.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref, - vm_ref, network_ref) + LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref, + vm_ref, network_ref) return vif_ref @classmethod + def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only): + """Create a VDI record and returns its reference.""" + vdi_ref = session.get_xenapi().VDI.create( + {'name_label': name_label, + 'name_description': '', + 'SR': sr_ref, + 'virtual_size': str(virtual_size), + 'type': 'User', + 'sharable': False, + 'read_only': read_only, + 'xenstore_data': {}, + 'other_config': {}, + 'sm_config': {}, + 'tags': []}) + LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref, + name_label, virtual_size, read_only, sr_ref) + return vdi_ref + + @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, Snapshot VHD """ #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added - logging.debug(_("Snapshotting VM %s with label '%s'..."), - vm_ref, label) + LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label) vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) vm_vdi_uuid = vm_vdi_rec["uuid"] @@ -227,8 +254,8 @@ class VMHelper(HelperBase): template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] - logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, - vm_ref) + LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, + vm_ref) parent_uuid = wait_for_vhd_coalesce( session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) @@ -237,15 +264,15 @@ class VMHelper(HelperBase): return template_vm_ref, [template_vdi_uuid, parent_uuid] @classmethod - def upload_image(cls, session, instance_id, vdi_uuids, image_name): + def upload_image(cls, session, instance_id, vdi_uuids, image_id): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ - logging.debug(_("Asking xapi to upload %s as '%s'"), - vdi_uuids, image_name) + logging.debug(_("Asking xapi to upload %s as ID %s"), + vdi_uuids, image_id) params = {'vdi_uuids': vdi_uuids, - 'image_name': image_name, + 'image_id': image_id, 'glance_host': FLAGS.glance_host, 'glance_port': FLAGS.glance_port} @@ -257,15 +284,71 @@ class VMHelper(HelperBase): def fetch_image(cls, session, instance_id, image, user, project, type): """ type is interpreted as an ImageType instance + Related flags: + xenapi_image_service = ['glance', 'objectstore'] + glance_address = 'address for glance services' + glance_port = 'port for glance services' """ - url = images.image_url(image) access = AuthManager().get_access_key(user, project) - logging.debug("Asking xapi to fetch %s as %s", url, access) + + if FLAGS.xenapi_image_service == 'glance': + return cls._fetch_image_glance(session, instance_id, image, + access, type) + else: + return cls._fetch_image_objectstore(session, instance_id, image, + access, user.secret, type) + + @classmethod + def _fetch_image_glance(cls, session, instance_id, image, access, type): + sr = find_sr(session) + if sr is None: + raise exception.NotFound('Cannot find SR to write VDI to') + + c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + + meta, image_file = c.get_image(image) + virtual_size = int(meta['size']) + vdi_size = virtual_size + LOG.debug(_("Size for image %s:%d"), image, virtual_size) + if type == ImageType.DISK: + # Make room for MBR. + vdi_size += MBR_SIZE_BYTES + + vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, + vdi_size, False) + + with_vdi_attached_here(session, vdi, False, + lambda dev: + _stream_disk(dev, type, + virtual_size, image_file)) + if (type == ImageType.KERNEL_RAMDISK): + #we need to invoke a plugin for copying VDI's + #content into proper path + LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi) + fn = "copy_kernel_vdi" + args = {} + args['vdi-ref'] = vdi + #let the plugin copy the correct number of bytes + args['image-size'] = str(vdi_size) + task = session.async_call_plugin('glance', fn, args) + filename = session.wait_for_task(instance_id, task) + #remove the VDI as it is not needed anymore + session.get_xenapi().VDI.destroy(vdi) + LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi) + return filename + else: + return session.get_xenapi().VDI.get_uuid(vdi) + + @classmethod + def _fetch_image_objectstore(cls, session, instance_id, image, access, + secret, type): + url = images.image_url(image) + LOG.debug(_("Asking xapi to fetch %s as %s"), url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url args['username'] = access - args['password'] = user.secret + args['password'] = secret args['add_partition'] = 'false' args['raw'] = 'false' if type != ImageType.KERNEL_RAMDISK: @@ -277,22 +360,46 @@ class VMHelper(HelperBase): return uuid @classmethod - def lookup_image(cls, session, vdi_ref): - logging.debug("Looking up vdi %s for PV kernel", vdi_ref) + def lookup_image(cls, session, instance_id, vdi_ref): + if FLAGS.xenapi_image_service == 'glance': + return cls._lookup_image_glance(session, vdi_ref) + else: + return cls._lookup_image_objectstore(session, instance_id, vdi_ref) + + @classmethod + def _lookup_image_objectstore(cls, session, instance_id, vdi_ref): + LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) fn = "is_vdi_pv" args = {} args['vdi-ref'] = vdi_ref - #TODO: Call proper function in plugin task = session.async_call_plugin('objectstore', fn, args) - pv_str = session.wait_for_task(task) + pv_str = session.wait_for_task(instance_id, task) + pv = None if pv_str.lower() == 'true': pv = True elif pv_str.lower() == 'false': pv = False - logging.debug("PV Kernel in VDI:%d", pv) + LOG.debug(_("PV Kernel in VDI:%d"), pv) return pv @classmethod + def _lookup_image_glance(cls, session, vdi_ref): + LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) + + def is_vdi_pv(dev): + LOG.debug(_("Running pygrub against %s"), dev) + output = os.popen('pygrub -qn /dev/%s' % dev) + for line in output.readlines(): + #try to find kernel string + m = re.search('(?<=kernel:)/.*(?:>)', line) + if m and m.group(0).find('xen') != -1: + LOG.debug(_("Found Xen kernel %s") % m.group(0)) + return True + LOG.debug(_("No Xen kernel found. Booting HVM.")) + return False + return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv) + + @classmethod def lookup(cls, session, i): """Look the instance i up, and returns it if available""" vms = session.get_xenapi().VM.get_by_name_label(i) @@ -317,10 +424,9 @@ class VMHelper(HelperBase): vdi = session.get_xenapi().VBD.get_VDI(vbd) # Test valid VDI record = session.get_xenapi().VDI.get_record(vdi) - logging.debug(_('VDI %s is still available'), - record['uuid']) + LOG.debug(_('VDI %s is still available'), record['uuid']) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) else: vdis.append(vdi) if len(vdis) > 0: @@ -331,10 +437,10 @@ class VMHelper(HelperBase): @classmethod def compile_info(cls, record): """Fill record with VM status information""" - logging.info(_("(VM_UTILS) xenserver vm state -> |%s|"), - record['power_state']) - logging.info(_("(VM_UTILS) xenapi power_state -> |%s|"), - XENAPI_POWER_STATE[record['power_state']]) + LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"), + record['power_state']) + LOG.info(_("(VM_UTILS) xenapi power_state -> |%s|"), + XENAPI_POWER_STATE[record['power_state']]) return {'state': XENAPI_POWER_STATE[record['power_state']], 'max_mem': long(record['memory_static_max']) >> 10, 'mem': long(record['memory_dynamic_max']) >> 10, @@ -360,7 +466,9 @@ class VMHelper(HelperBase): if i >= 3 and i <= 11: ref = node.childNodes # Name and Value - diags[ref[0].firstChild.data] = ref[6].firstChild.data + if len(ref) > 6: + diags[ref[0].firstChild.data] = \ + ref[6].firstChild.data return diags except cls.XenAPI.Failure as e: return {"Unable to retrieve diagnostics": e} @@ -388,11 +496,9 @@ def get_vhd_parent(session, vdi_rec): """ if 'vhd-parent' in vdi_rec['sm_config']: parent_uuid = vdi_rec['sm_config']['vhd-parent'] - #NOTE(sirp): changed xenapi -> get_xenapi() parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid) parent_rec = session.get_xenapi().VDI.get_record(parent_ref) - #NOTE(sirp): changed log -> logging - logging.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref) + LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref) return parent_ref, parent_rec else: return None @@ -409,7 +515,7 @@ def get_vhd_parent_uuid(session, vdi_ref): def scan_sr(session, instance_id, sr_ref): - logging.debug(_("Re-scanning SR %s"), sr_ref) + LOG.debug(_("Re-scanning SR %s"), sr_ref) task = session.call_xenapi('Async.SR.scan', sr_ref) session.wait_for_task(instance_id, task) @@ -427,24 +533,29 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, * parent_vhd snapshot """ - #TODO(sirp): we need to timeout this req after a while + max_attempts = FLAGS.xenapi_vhd_coalesce_max_attempts + attempts = {'counter': 0} def _poll_vhds(): + attempts['counter'] += 1 + if attempts['counter'] > max_attempts: + msg = (_("VHD coalesce attempts exceeded (%d > %d), giving up...") + % (attempts['counter'], max_attempts)) + raise exception.Error(msg) + scan_sr(session, instance_id, sr_ref) parent_uuid = get_vhd_parent_uuid(session, vdi_ref) if original_parent_uuid and (parent_uuid != original_parent_uuid): - logging.debug( - _("Parent %s doesn't match original parent %s, " - "waiting for coalesce..."), - parent_uuid, original_parent_uuid) + LOG.debug(_("Parent %s doesn't match original parent %s, " + "waiting for coalesce..."), parent_uuid, + original_parent_uuid) else: - done.send(parent_uuid) + # Breakout of the loop (normally) and return the parent_uuid + raise utils.LoopingCallDone(parent_uuid) - done = event.Event() loop = utils.LoopingCall(_poll_vhds) loop.start(FLAGS.xenapi_vhd_coalesce_poll_interval, now=True) - parent_uuid = done.wait() - loop.stop() + parent_uuid = loop.wait() return parent_uuid @@ -461,3 +572,123 @@ def get_vdi_for_vm_safely(session, vm_ref): vdi_ref = vdi_refs[0] vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) return vdi_ref, vdi_rec + + +def find_sr(session): + host = session.get_xenapi_host() + srs = session.get_xenapi().SR.get_all() + for sr in srs: + sr_rec = session.get_xenapi().SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.get_xenapi().PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +def with_vdi_attached_here(session, vdi, read_only, f): + this_vm_ref = get_this_vm_ref(session) + vbd_rec = {} + vbd_rec['VM'] = this_vm_ref + vbd_rec['VDI'] = vdi + vbd_rec['userdevice'] = 'autodetect' + vbd_rec['bootable'] = False + vbd_rec['mode'] = read_only and 'RO' or 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + LOG.debug(_('Creating VBD for VDI %s ... '), vdi) + vbd = session.get_xenapi().VBD.create(vbd_rec) + LOG.debug(_('Creating VBD for VDI %s done.'), vdi) + try: + LOG.debug(_('Plugging VBD %s ... '), vbd) + session.get_xenapi().VBD.plug(vbd) + LOG.debug(_('Plugging VBD %s done.'), vbd) + return f(session.get_xenapi().VBD.get_device(vbd)) + finally: + LOG.debug(_('Destroying VBD for VDI %s ... '), vdi) + vbd_unplug_with_retry(session, vbd) + ignore_failure(session.get_xenapi().VBD.destroy, vbd) + LOG.debug(_('Destroying VBD for VDI %s done.'), vdi) + + +def vbd_unplug_with_retry(session, vbd): + """Call VBD.unplug on the given VBD, with a retry if we get + DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're + seeing the device still in use, even when all processes using the device + should be dead.""" + while True: + try: + session.get_xenapi().VBD.unplug(vbd) + LOG.debug(_('VBD.unplug successful first time.')) + return + except VMHelper.XenAPI.Failure, e: + if (len(e.details) > 0 and + e.details[0] == 'DEVICE_DETACH_REJECTED'): + LOG.debug(_('VBD.unplug rejected: retrying...')) + time.sleep(1) + elif (len(e.details) > 0 and + e.details[0] == 'DEVICE_ALREADY_DETACHED'): + LOG.debug(_('VBD.unplug successful eventually.')) + return + else: + LOG.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'), + e) + return + + +def ignore_failure(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except VMHelper.XenAPI.Failure, e: + LOG.error(_('Ignoring XenAPI.Failure %s'), e) + return None + + +def get_this_vm_uuid(): + with file('/sys/hypervisor/uuid') as f: + return f.readline().strip() + + +def get_this_vm_ref(session): + return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) + + +def _stream_disk(dev, type, virtual_size, image_file): + offset = 0 + if type == ImageType.DISK: + offset = MBR_SIZE_BYTES + _write_partition(virtual_size, dev) + + with open('/dev/%s' % dev, 'wb') as f: + f.seek(offset) + for chunk in image_file: + f.write(chunk) + + +def _write_partition(virtual_size, dev): + dest = '/dev/%s' % dev + mbr_last = MBR_SIZE_SECTORS - 1 + primary_first = MBR_SIZE_SECTORS + primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 + + LOG.debug(_('Writing partition table %d %d to %s...'), + primary_first, primary_last, dest) + + def execute(cmd, process_input=None, check_exit_code=True): + return utils.execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) + + execute('parted --script %s mklabel msdos' % dest) + execute('parted --script %s mkpart primary %ds %ds' % + (dest, primary_first, primary_last)) + + LOG.debug(_('Writing partition table %s done.'), dest) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b6d620782..6c2fd6a68 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -20,10 +20,15 @@ Management class for VM-related functions (spawn, reboot, etc). """ import json -import logging +import M2Crypto +import os +import subprocess +import tempfile +import uuid from nova import db from nova import context +from nova import log as logging from nova import exception from nova import utils @@ -33,6 +38,9 @@ from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.vm_utils import ImageType +XenAPI = None +LOG = logging.getLogger("nova.virt.xenapi.vmops") + class VMOps(object): """ @@ -77,7 +85,8 @@ class VMOps(object): #Have a look at the VDI and see if it has a PV kernel pv_kernel = False if not instance.kernel_id: - pv_kernel = VMHelper.lookup_image(self._session, vdi_ref) + pv_kernel = VMHelper.lookup_image(self._session, instance.id, + vdi_ref) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, @@ -93,10 +102,9 @@ class VMOps(object): if network_ref: VMHelper.create_vif(self._session, vm_ref, network_ref, instance.mac_address) - logging.debug(_('Starting VM %s...'), vm_ref) + LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) - logging.info(_('Spawning VM %s created %s.'), instance.name, - vm_ref) + LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref) # NOTE(armando): Do we really need to do this in virt? timer = utils.LoopingCall(f=None) @@ -107,12 +115,12 @@ class VMOps(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('Instance %s: booted'), instance['name']) + LOG.debug(_('Instance %s: booted'), instance['name']) timer.stop() except Exception, exc: - logging.warn(exc) - logging.exception(_('instance %s: failed to boot'), - instance['name']) + LOG.warn(exc) + LOG.exception(_('instance %s: failed to boot'), + instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -125,21 +133,40 @@ class VMOps(object): """Refactored out the common code of many methods that receive either a vm name or a vm instance, and want a vm instance in return. """ + vm = None try: - instance_name = instance_or_vm.name - vm = VMHelper.lookup(self._session, instance_name) - except AttributeError: - # A vm opaque ref was passed - vm = instance_or_vm + if instance_or_vm.startswith("OpaqueRef:"): + # Got passed an opaque ref; return it + return instance_or_vm + else: + # Must be the instance name + instance_name = instance_or_vm + except (AttributeError, KeyError): + # Note the the KeyError will only happen with fakes.py + # Not a string; must be an ID or a vm instance + if isinstance(instance_or_vm, (int, long)): + ctx = context.get_admin_context() + try: + instance_obj = db.instance_get_by_id(ctx, instance_or_vm) + instance_name = instance_obj.name + except exception.NotFound: + # The unit tests screw this up, as they use an integer for + # the vm name. I'd fix that up, but that's a matter for + # another bug report. So for now, just try with the passed + # value + instance_name = instance_or_vm + else: + instance_name = instance_or_vm.name + vm = VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception(_('Instance not present %s') % instance_name) return vm - def snapshot(self, instance, name): + def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance :param instance: instance to be snapshotted - :param name: name/label to be given to the snapshot + :param image_id: id of image to upload to Steps involved in a XenServer snapshot: @@ -175,7 +202,7 @@ class VMOps(object): try: # call plugin to ship snapshot off to glance VMHelper.upload_image( - self._session, instance.id, template_vdi_uuids, name) + self._session, instance.id, template_vdi_uuids, image_id) finally: self._destroy(instance, template_vm_ref, shutdown=False) @@ -187,6 +214,44 @@ class VMOps(object): task = self._session.call_xenapi('Async.VM.clean_reboot', vm) self._session.wait_for_task(instance.id, task) + def set_admin_password(self, instance, new_pass): + """Set the root/admin password on the VM instance. This is done via + an agent running on the VM. Communication between nova and the agent + is done via writing xenstore records. Since communication is done over + the XenAPI RPC calls, we need to encrypt the password. We're using a + simple Diffie-Hellman class instead of the more advanced one in + M2Crypto for compatibility with the agent code. + """ + # Need to uniquely identify this request. + transaction_id = str(uuid.uuid4()) + # The simple Diffie-Hellman class is used to manage key exchange. + dh = SimpleDH() + args = {'id': transaction_id, 'pub': str(dh.get_public())} + resp = self._make_agent_call('key_init', instance, '', args) + if resp is None: + # No response from the agent + return + resp_dict = json.loads(resp) + # Successful return code from key_init is 'D0' + if resp_dict['returncode'] != 'D0': + # There was some sort of error; the message will contain + # a description of the error. + raise RuntimeError(resp_dict['message']) + agent_pub = int(resp_dict['message']) + dh.compute_shared(agent_pub) + enc_pass = dh.encrypt(new_pass) + # Send the encrypted password + args['enc_pass'] = enc_pass + resp = self._make_agent_call('password', instance, '', args) + if resp is None: + # No response from the agent + return + resp_dict = json.loads(resp) + # Successful return code from password is '0' + if resp_dict['returncode'] != '0': + raise RuntimeError(resp_dict['message']) + return resp_dict['message'] + def destroy(self, instance): """Destroy VM instance""" vm = VMHelper.lookup(self._session, instance.name) @@ -205,7 +270,7 @@ class VMOps(object): task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) # Disk clean-up if vdis: @@ -214,20 +279,20 @@ class VMOps(object): task = self._session.call_xenapi('Async.VDI.destroy', vdi) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) # VM Destroy try: task = self._session.call_xenapi('Async.VM.destroy', vm) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) def _wait_with_callback(self, instance_id, task, callback): ret = None try: ret = self._session.wait_for_task(instance_id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) callback(ret) def pause(self, instance, callback): @@ -244,30 +309,19 @@ class VMOps(object): def suspend(self, instance, callback): """suspend the specified instance""" - instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise Exception(_("suspend: instance not present %s") % - instance_name) + vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.suspend', vm) - self._wait_with_callback(task, callback) + self._wait_with_callback(instance.id, task, callback) def resume(self, instance, callback): """resume the specified instance""" - instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise Exception(_("resume: instance not present %s") % - instance_name) + vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.resume', vm, False, True) - self._wait_with_callback(task, callback) + self._wait_with_callback(instance.id, task, callback) - def get_info(self, instance_id): + def get_info(self, instance): """Return data about VM instance""" - vm = VMHelper.lookup(self._session, instance_id) - if vm is None: - raise exception.NotFound(_('Instance not' - ' found %s') % instance_id) + vm = self._get_vm_opaque_ref(instance) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_info(rec) @@ -282,6 +336,11 @@ class VMOps(object): # TODO: implement this to fix pylint! return 'FAKE CONSOLE OUTPUT of instance' + def get_ajax_console(self, instance): + """Return link to instance's ajax console""" + # TODO: implement this! + return 'http://fakeajaxconsole/fake_url' + def list_from_xenstore(self, vm, path): """Runs the xenstore-ls command to get a listing of all records from 'path' downward. Returns a dict with the sub-paths as keys, @@ -326,22 +385,34 @@ class VMOps(object): return self._make_plugin_call('xenstore.py', method=method, vm=vm, path=path, addl_args=addl_args) + def _make_agent_call(self, method, vm, path, addl_args={}): + """Abstracts out the interaction with the agent xenapi plugin.""" + return self._make_plugin_call('agent', method=method, vm=vm, + path=path, addl_args=addl_args) + def _make_plugin_call(self, plugin, method, vm, path, addl_args={}): """Abstracts out the process of calling a method of a xenapi plugin. Any errors raised by the plugin will in turn raise a RuntimeError here. """ + instance_id = vm.id vm = self._get_vm_opaque_ref(vm) rec = self._session.get_xenapi().VM.get_record(vm) args = {'dom_id': rec['domid'], 'path': path} args.update(addl_args) - # If the 'testing_mode' attribute is set, add that to the args. - if getattr(self, 'testing_mode', False): - args['testing_mode'] = 'true' try: task = self._session.async_call_plugin(plugin, method, args) - ret = self._session.wait_for_task(0, task) + ret = self._session.wait_for_task(instance_id, task) except self.XenAPI.Failure, e: - raise RuntimeError("%s" % e.details[-1]) + ret = None + err_trace = e.details[-1] + err_msg = err_trace.splitlines()[-1] + strargs = str(args) + if 'TIMEOUT:' in err_msg: + LOG.error(_('TIMEOUT: The call to %(method)s timed out. ' + 'VM id=%(instance_id)s; args=%(strargs)s') % locals()) + else: + LOG.error(_('The call to %(method)s returned an error: %(e)s. ' + 'VM id=%(instance_id)s; args=%(strargs)s') % locals()) return ret def add_to_xenstore(self, vm, path, key, value): @@ -453,3 +524,89 @@ class VMOps(object): """Removes all data from the xenstore parameter record for this VM.""" self.write_to_param_xenstore(instance_or_vm, {}) ######################################################################## + + +def _runproc(cmd): + pipe = subprocess.PIPE + return subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe, + stderr=pipe, close_fds=True) + + +class SimpleDH(object): + """This class wraps all the functionality needed to implement + basic Diffie-Hellman-Merkle key exchange in Python. It features + intelligent defaults for the prime and base numbers needed for the + calculation, while allowing you to supply your own. It requires that + the openssl binary be installed on the system on which this is run, + as it uses that to handle the encryption and decryption. If openssl + is not available, a RuntimeError will be raised. + """ + def __init__(self, prime=None, base=None, secret=None): + """You can specify the values for prime and base if you wish; + otherwise, reasonable default values will be used. + """ + if prime is None: + self._prime = 162259276829213363391578010288127 + else: + self._prime = prime + if base is None: + self._base = 5 + else: + self._base = base + self._shared = self._public = None + + self._dh = M2Crypto.DH.set_params( + self.dec_to_mpi(self._prime), + self.dec_to_mpi(self._base)) + self._dh.gen_key() + self._public = self.mpi_to_dec(self._dh.pub) + + def get_public(self): + return self._public + + def compute_shared(self, other): + self._shared = self.bin_to_dec( + self._dh.compute_key(self.dec_to_mpi(other))) + return self._shared + + def mpi_to_dec(self, mpi): + bn = M2Crypto.m2.mpi_to_bn(mpi) + hexval = M2Crypto.m2.bn_to_hex(bn) + dec = int(hexval, 16) + return dec + + def bin_to_dec(self, binval): + bn = M2Crypto.m2.bin_to_bn(binval) + hexval = M2Crypto.m2.bn_to_hex(bn) + dec = int(hexval, 16) + return dec + + def dec_to_mpi(self, dec): + bn = M2Crypto.m2.dec_to_bn('%s' % dec) + mpi = M2Crypto.m2.bn_to_mpi(bn) + return mpi + + def _run_ssl(self, text, which): + base_cmd = ('cat %(tmpfile)s | openssl enc -aes-128-cbc ' + '-a -pass pass:%(shared)s -nosalt %(dec_flag)s') + if which.lower()[0] == 'd': + dec_flag = ' -d' + else: + dec_flag = '' + fd, tmpfile = tempfile.mkstemp() + os.close(fd) + file(tmpfile, 'w').write(text) + shared = self._shared + cmd = base_cmd % locals() + proc = _runproc(cmd) + proc.wait() + err = proc.stderr.read() + if err: + raise RuntimeError(_('OpenSSL error: %s') % err) + return proc.stdout.read() + + def encrypt(self, text): + return self._run_ssl(text, 'enc') + + def decrypt(self, text): + return self._run_ssl(text, 'dec') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 4bbc41b03..0cd15b950 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -21,16 +21,17 @@ and storage repositories import re import string -import logging from nova import db from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.virt.xenapi import HelperBase FLAGS = flags.FLAGS +LOG = logging.getLogger("nova.virt.xenapi.volume_utils") class StorageError(Exception): @@ -53,7 +54,7 @@ class VolumeHelper(HelperBase): """ sr_ref = session.get_xenapi().SR.get_by_name_label(label) if len(sr_ref) == 0: - logging.debug('Introducing %s...', label) + LOG.debug(_('Introducing %s...'), label) record = {} if 'chapuser' in info and 'chappassword' in info: record = {'target': info['targetHost'], @@ -70,10 +71,10 @@ class VolumeHelper(HelperBase): session.get_xenapi_host(), record, '0', label, description, 'iscsi', '', False, {}) - logging.debug('Introduced %s as %s.', label, sr_ref) + LOG.debug(_('Introduced %s as %s.'), label, sr_ref) return sr_ref except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to create Storage Repository')) else: return sr_ref[0] @@ -85,32 +86,32 @@ class VolumeHelper(HelperBase): vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref) sr_ref = session.get_xenapi().VDI.get_SR(vdi_ref) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref) return sr_ref @classmethod def destroy_iscsi_storage(cls, session, sr_ref): """Forget the SR whilst preserving the state of the disk""" - logging.debug("Forgetting SR %s ... ", sr_ref) + LOG.debug(_("Forgetting SR %s ... "), sr_ref) pbds = [] try: pbds = session.get_xenapi().SR.get_PBDs(sr_ref) except cls.XenAPI.Failure, exc: - logging.warn('Ignoring exception %s when getting PBDs for %s', - exc, sr_ref) + LOG.warn(_('Ignoring exception %s when getting PBDs for %s'), + exc, sr_ref) for pbd in pbds: try: session.get_xenapi().PBD.unplug(pbd) except cls.XenAPI.Failure, exc: - logging.warn('Ignoring exception %s when unplugging PBD %s', - exc, pbd) + LOG.warn(_('Ignoring exception %s when unplugging PBD %s'), + exc, pbd) try: session.get_xenapi().SR.forget(sr_ref) - logging.debug("Forgetting SR %s done.", sr_ref) + LOG.debug(_("Forgetting SR %s done."), sr_ref) except cls.XenAPI.Failure, exc: - logging.warn('Ignoring exception %s when forgetting SR %s', - exc, sr_ref) + LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc, + sr_ref) @classmethod def introduce_vdi(cls, session, sr_ref): @@ -118,12 +119,12 @@ class VolumeHelper(HelperBase): try: vdis = session.get_xenapi().SR.get_VDIs(sr_ref) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref) try: vdi_rec = session.get_xenapi().VDI.get_record(vdis[0]) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to get record' ' of VDI %s on') % vdis[0]) else: @@ -141,7 +142,7 @@ class VolumeHelper(HelperBase): vdi_rec['xenstore_data'], vdi_rec['sm_config']) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to introduce VDI for SR %s') % sr_ref) @@ -165,11 +166,8 @@ class VolumeHelper(HelperBase): target_host = _get_target_host(iscsi_portal) target_port = _get_target_port(iscsi_portal) target_iqn = _get_iqn(iscsi_name, volume_id) - logging.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)', - volume_id, - target_host, - target_port, - target_iqn) + LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)', + volume_id, target_host, target_port, target_iqn) if (device_number < 0) or \ (volume_id is None) or \ (target_host is None) or \ @@ -196,7 +194,7 @@ class VolumeHelper(HelperBase): elif re.match('^[0-9]+$', mountpoint): return string.atoi(mountpoint, 10) else: - logging.warn('Mountpoint cannot be translated: %s', mountpoint) + LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint) return -1 @@ -257,7 +255,7 @@ def _get_target(volume_id): "sendtargets -p %s" % volume_ref['host']) except exception.ProcessExecutionError, exc: - logging.warn(exc) + LOG.exception(exc) else: targets = r.splitlines() if len(_e) == 0 and len(targets) == 1: diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index fdeb2506c..189f968c6 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -17,14 +17,17 @@ """ Management class for Storage-related functions (attach, detach, etc). """ -import logging from nova import exception +from nova import log as logging from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError +LOG = logging.getLogger("nova.virt.xenapi.volumeops") + + class VolumeOps(object): """ Management class for Volume-related tasks @@ -45,8 +48,8 @@ class VolumeOps(object): raise exception.NotFound(_('Instance %s not found') % instance_name) # NOTE: No Resource Pool concept so far - logging.debug(_("Attach_volume: %s, %s, %s"), - instance_name, device_path, mountpoint) + LOG.debug(_("Attach_volume: %s, %s, %s"), + instance_name, device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) @@ -61,7 +64,7 @@ class VolumeOps(object): try: vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception(_('Unable to create VDI on SR %s for instance %s') % (sr_ref, @@ -73,7 +76,7 @@ class VolumeOps(object): vol_rec['deviceNumber'], False) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception(_('Unable to use SR %s for instance %s') % (sr_ref, @@ -84,13 +87,13 @@ class VolumeOps(object): vbd_ref) self._session.wait_for_task(vol_rec['deviceNumber'], task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception(_('Unable to attach volume to instance %s') % instance_name) - logging.info(_('Mountpoint %s attached to instance %s'), - mountpoint, instance_name) + LOG.info(_('Mountpoint %s attached to instance %s'), + mountpoint, instance_name) def detach_volume(self, instance_name, mountpoint): """Detach volume storage to VM instance""" @@ -100,13 +103,13 @@ class VolumeOps(object): raise exception.NotFound(_('Instance %s not found') % instance_name) # Detach VBD from VM - logging.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint) + LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint) device_number = VolumeHelper.mountpoint_to_number(mountpoint) try: vbd_ref = VMHelper.find_vbd_by_number(self._session, vm_ref, device_number) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) raise Exception(_('Unable to locate volume %s') % mountpoint) else: try: @@ -114,13 +117,13 @@ class VolumeOps(object): vbd_ref) VMHelper.unplug_vbd(self._session, vbd_ref) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) raise Exception(_('Unable to detach volume %s') % mountpoint) try: VMHelper.destroy_vbd(self._session, vbd_ref) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) # Forget SR VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - logging.info(_('Mountpoint %s detached from instance %s'), - mountpoint, instance_name) + LOG.info(_('Mountpoint %s detached from instance %s'), + mountpoint, instance_name) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index c48f5b7cb..c57c883c9 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -51,8 +51,8 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. :iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack' """ -import logging import sys +import urlparse import xmlrpclib from eventlet import event @@ -62,9 +62,14 @@ from nova import context from nova import db from nova import utils from nova import flags +from nova import log as logging from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps + +LOG = logging.getLogger("nova.virt.xenapi") + + FLAGS = flags.FLAGS flags.DEFINE_string('xenapi_connection_url', @@ -84,10 +89,17 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') +flags.DEFINE_string('xenapi_image_service', + 'glance', + 'Where to get VM images: glance or objectstore.') flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval', 5.0, 'The interval used for polling of coalescing vhds.' ' Used only if connection_type=xenapi.') +flags.DEFINE_integer('xenapi_vhd_coalesce_max_attempts', + 5, + 'Max number of times to poll for VHD to coalesce.' + ' Used only if connection_type=xenapi.') flags.DEFINE_string('target_host', None, 'iSCSI Target Host') @@ -136,14 +148,18 @@ class XenAPIConnection(object): """Create VM instance""" self._vmops.spawn(instance) - def snapshot(self, instance, name): + def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance """ - self._vmops.snapshot(instance, name) + self._vmops.snapshot(instance, image_id) def reboot(self, instance): """Reboot VM instance""" self._vmops.reboot(instance) + def set_admin_password(self, instance, new_pass): + """Set the root/admin password on the VM instance""" + self._vmops.set_admin_password(instance, new_pass) + def destroy(self, instance): """Destroy VM instance""" self._vmops.destroy(instance) @@ -176,6 +192,10 @@ class XenAPIConnection(object): """Return snapshot of console""" return self._vmops.get_console_output(instance) + def get_ajax_console(self, instance): + """Return link to instance's ajax console""" + return self._vmops.get_ajax_console(instance) + def attach_volume(self, instance_name, device_path, mountpoint): """Attach volume storage to VM instance""" return self._volumeops.attach_volume(instance_name, @@ -186,6 +206,12 @@ class XenAPIConnection(object): """Detach volume storage to VM instance""" return self._volumeops.detach_volume(instance_name, mountpoint) + def get_console_pool_info(self, console_type): + xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url) + return {'address': xs_url.netloc, + 'username': FLAGS.xenapi_connection_username, + 'password': FLAGS.xenapi_connection_password} + class XenAPISession(object): """The session to invoke XenAPI SDK calls""" @@ -194,6 +220,7 @@ class XenAPISession(object): self.XenAPI = self.get_imported_xenapi() self._session = self._create_session(url) self._session.login_with_password(user, pw) + self.loop = None def get_imported_xenapi(self): """Stubout point. This can be replaced with a mock xenapi module.""" @@ -230,21 +257,28 @@ class XenAPISession(object): def wait_for_task(self, id, task): """Return the result of the given task. The task is polled - until it completes.""" + until it completes. Not re-entrant.""" done = event.Event() - loop = utils.LoopingCall(self._poll_task, id, task, done) - loop.start(FLAGS.xenapi_task_poll_interval, now=True) + self.loop = utils.LoopingCall(self._poll_task, id, task, done) + self.loop.start(FLAGS.xenapi_task_poll_interval, now=True) rv = done.wait() - loop.stop() + self.loop.stop() return rv + def _stop_loop(self): + """Stop polling for task to finish.""" + #NOTE(sandy-walsh) Had to break this call out to support unit tests. + if self.loop: + self.loop.stop() + def _create_session(self, url): """Stubout point. This can be replaced with a mock session.""" return self.XenAPI.Session(url) def _poll_task(self, id, task, done): """Poll the given XenAPI task, and fire the given action if we - get a result.""" + get a result. + """ try: name = self._session.xenapi.task.get_name_label(task) status = self._session.xenapi.task.get_status(task) @@ -256,7 +290,7 @@ class XenAPISession(object): return elif status == "success": result = self._session.xenapi.task.get_result(task) - logging.info(_("Task [%s] %s status: success %s") % ( + LOG.info(_("Task [%s] %s status: success %s") % ( name, task, result)) @@ -264,7 +298,7 @@ class XenAPISession(object): else: error_info = self._session.xenapi.task.get_error_info(task) action["error"] = str(error_info) - logging.warn(_("Task [%s] %s status: %s %s") % ( + LOG.warn(_("Task [%s] %s status: %s %s") % ( name, task, status, @@ -272,15 +306,16 @@ class XenAPISession(object): done.send_exception(self.XenAPI.Failure(error_info)) db.instance_action_create(context.get_admin_context(), action) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.warn(exc) done.send_exception(*sys.exc_info()) + self._stop_loop() def _unwrap_plugin_exceptions(self, func, *args, **kwargs): """Parse exception details""" try: return func(*args, **kwargs) except self.XenAPI.Failure, exc: - logging.debug(_("Got exception: %s"), exc) + LOG.debug(_("Got exception: %s"), exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): @@ -293,7 +328,7 @@ class XenAPISession(object): else: raise except xmlrpclib.ProtocolError, exc: - logging.debug(_("Got exception: %s"), exc) + LOG.debug(_("Got exception: %s"), exc) raise diff --git a/nova/volume/api.py b/nova/volume/api.py index 2d7fe3762..ce4831cc3 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -21,11 +21,11 @@ Handles all requests relating to volumes. """ import datetime -import logging from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import quota from nova import rpc from nova.db import base @@ -33,16 +33,18 @@ from nova.db import base FLAGS = flags.FLAGS flags.DECLARE('storage_availability_zone', 'nova.volume.manager') +LOG = logging.getLogger('nova.volume') + class API(base.Base): """API for interacting with the volume manager.""" def create(self, context, size, name, description): if quota.allowed_volumes(context, 1, size) < 1: - logging.warn("Quota exceeeded for %s, tried to create %sG volume", + LOG.warn(_("Quota exceeeded for %s, tried to create %sG volume"), context.project_id, size) - raise quota.QuotaError("Volume quota exceeded. You cannot " - "create a volume of size %s" % size) + raise quota.QuotaError(_("Volume quota exceeded. You cannot " + "create a volume of size %s") % size) options = { 'size': size, diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 8353b9712..5fefa10cf 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -20,15 +20,15 @@ Drivers for volumes. """ -import logging -import os import time from nova import exception from nova import flags +from nova import log as logging from nova import utils +LOG = logging.getLogger("nova.volume.driver") FLAGS = flags.FLAGS flags.DEFINE_string('volume_group', 'nova-volumes', 'Name for the VG that will contain exported volumes') @@ -47,8 +47,10 @@ flags.DEFINE_integer('iscsi_num_targets', 'Number of iscsi target ids per host') flags.DEFINE_string('iscsi_target_prefix', 'iqn.2010-10.org.openstack:', 'prefix for iscsi volumes') -flags.DEFINE_string('iscsi_ip_prefix', '127.0', +flags.DEFINE_string('iscsi_ip_prefix', '$my_ip', 'discover volumes on the ip that starts with this prefix') +flags.DEFINE_string('rbd_pool', 'rbd', + 'the rbd pool in which volumes are stored') class VolumeDriver(object): @@ -73,13 +75,15 @@ class VolumeDriver(object): tries = tries + 1 if tries >= FLAGS.num_shell_tries: raise - logging.exception(_("Recovering from a failed execute." - "Try number %s"), tries) + LOG.exception(_("Recovering from a failed execute. " + "Try number %s"), tries) time.sleep(tries ** 2) def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" - if not os.path.isdir("/dev/%s" % FLAGS.volume_group): + out, err = self._execute("sudo vgs --noheadings -o name") + volume_groups = out.split() + if not FLAGS.volume_group in volume_groups: raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) @@ -205,7 +209,7 @@ class FakeAOEDriver(AOEDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug(_("FAKE AOE: %s"), cmd) + LOG.debug(_("FAKE AOE: %s"), cmd) return (None, None) @@ -281,7 +285,8 @@ class ISCSIDriver(VolumeDriver): self._execute("sudo iscsiadm -m node -T %s -p %s --op update " "-n node.startup -v automatic" % (iscsi_name, iscsi_portal)) - return "/dev/iscsi/%s" % volume['name'] + return "/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal, + iscsi_name) def undiscover_volume(self, volume): """Undiscover volume on a remote host.""" @@ -310,5 +315,109 @@ class FakeISCSIDriver(ISCSIDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug(_("FAKE ISCSI: %s"), cmd) + LOG.debug(_("FAKE ISCSI: %s"), cmd) return (None, None) + + +class RBDDriver(VolumeDriver): + """Implements RADOS block device (RBD) volume commands""" + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + (stdout, stderr) = self._execute("rados lspools") + pools = stdout.split("\n") + if not FLAGS.rbd_pool in pools: + raise exception.Error(_("rbd has no pool %s") % + FLAGS.rbd_pool) + + def create_volume(self, volume): + """Creates a logical volume.""" + if int(volume['size']) == 0: + size = 100 + else: + size = int(volume['size']) * 1024 + self._try_execute("rbd --pool %s --size %d create %s" % + (FLAGS.rbd_pool, + size, + volume['name'])) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + self._try_execute("rbd --pool %s rm %s" % + (FLAGS.rbd_pool, + volume['name'])) + + def local_path(self, volume): + """Returns the path of the rbd volume.""" + # This is the same as the remote path + # since qemu accesses it directly. + return self.discover_volume(volume) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume""" + pass + + def discover_volume(self, volume): + """Discover volume on a remote host""" + return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name']) + + def undiscover_volume(self, volume): + """Undiscover volume on a remote host""" + pass + + +class SheepdogDriver(VolumeDriver): + """Executes commands relating to Sheepdog Volumes""" + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + try: + (out, err) = self._execute("collie cluster info") + if not out.startswith('running'): + raise exception.Error(_("Sheepdog is not working: %s") % out) + except exception.ProcessExecutionError: + raise exception.Error(_("Sheepdog is not working")) + + def create_volume(self, volume): + """Creates a sheepdog volume""" + if int(volume['size']) == 0: + sizestr = '100M' + else: + sizestr = '%sG' % volume['size'] + self._try_execute("qemu-img create sheepdog:%s %s" % + (volume['name'], sizestr)) + + def delete_volume(self, volume): + """Deletes a logical volume""" + self._try_execute("collie vdi delete %s" % volume['name']) + + def local_path(self, volume): + return "sheepdog:%s" % volume['name'] + + def ensure_export(self, context, volume): + """Safely and synchronously recreates an export for a logical volume""" + pass + + def create_export(self, context, volume): + """Exports the volume""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume""" + pass + + def discover_volume(self, volume): + """Discover volume on a remote host""" + return "sheepdog:%s" % volume['name'] + + def undiscover_volume(self, volume): + """Undiscover volume on a remote host""" + pass diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 966334c50..6348539c5 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -42,17 +42,18 @@ intact. """ -import logging import datetime from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import utils +LOG = logging.getLogger('nova.volume.manager') FLAGS = flags.FLAGS flags.DEFINE_string('storage_availability_zone', 'nova', @@ -81,7 +82,7 @@ class VolumeManager(manager.Manager): self.driver.check_for_setup_error() ctxt = context.get_admin_context() volumes = self.db.volume_get_all_by_host(ctxt, self.host) - logging.debug(_("Re-exporting %s volumes"), len(volumes)) + LOG.debug(_("Re-exporting %s volumes"), len(volumes)) for volume in volumes: self.driver.ensure_export(ctxt, volume) @@ -89,7 +90,7 @@ class VolumeManager(manager.Manager): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) - logging.info(_("volume %s: creating"), volume_ref['name']) + LOG.info(_("volume %s: creating"), volume_ref['name']) self.db.volume_update(context, volume_id, @@ -98,18 +99,18 @@ class VolumeManager(manager.Manager): # before passing it to the driver. volume_ref['host'] = self.host - logging.debug(_("volume %s: creating lv of size %sG"), - volume_ref['name'], volume_ref['size']) + LOG.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'], + volume_ref['size']) self.driver.create_volume(volume_ref) - logging.debug(_("volume %s: creating export"), volume_ref['name']) + LOG.debug(_("volume %s: creating export"), volume_ref['name']) self.driver.create_export(context, volume_ref) now = datetime.datetime.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) - logging.debug(_("volume %s: created successfully"), volume_ref['name']) + LOG.debug(_("volume %s: created successfully"), volume_ref['name']) return volume_id def delete_volume(self, context, volume_id): @@ -120,12 +121,12 @@ class VolumeManager(manager.Manager): raise exception.Error(_("Volume is still attached")) if volume_ref['host'] != self.host: raise exception.Error(_("Volume is not local to this node")) - logging.debug(_("volume %s: removing export"), volume_ref['name']) + LOG.debug(_("volume %s: removing export"), volume_ref['name']) self.driver.remove_export(context, volume_ref) - logging.debug(_("volume %s: deleting"), volume_ref['name']) + LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) self.db.volume_destroy(context, volume_id) - logging.debug(_("volume %s: deleted successfully"), volume_ref['name']) + LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True def setup_compute_volume(self, context, volume_id): diff --git a/nova/wsgi.py b/nova/wsgi.py index b5d6b96c1..4f5307d80 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -21,8 +21,7 @@ Utility methods for working with WSGI servers """ -import json -import logging +import os import sys from xml.dom import minidom @@ -35,18 +34,37 @@ import webob import webob.dec import webob.exc +from paste import deploy -logging.getLogger("routes.middleware").addHandler(logging.StreamHandler()) +from nova import flags +from nova import log as logging +from nova import utils + + +FLAGS = flags.FLAGS + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.DEBUG): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg) class Server(object): """Server class to manage multiple WSGI sockets and applications.""" def __init__(self, threads=1000): + logging.basicConfig() self.pool = eventlet.GreenPool(threads) def start(self, application, port, host='0.0.0.0', backlog=128): """Run a WSGI server with the given application.""" + logging.audit(_("Starting %s on %s:%s"), sys.argv[0], host, port) socket = eventlet.listen((host, port), backlog=backlog) self.pool.spawn_n(self._run, application, socket) @@ -59,14 +77,39 @@ class Server(object): def _run(self, application, socket): """Start a WSGI server in a new green thread.""" - eventlet.wsgi.server(socket, application, custom_pool=self.pool) + logger = logging.getLogger('eventlet.wsgi.server') + eventlet.wsgi.server(socket, application, custom_pool=self.pool, + log=WritableLogger(logger)) class Application(object): -# TODO(gundlach): I think we should toss this class, now that it has no -# purpose. """Base WSGI application wrapper. Subclasses need to implement __call__.""" + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config fles. + + Any local configuration (that is, values under the [app:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [app:wadl] + latest_version = 1.3 + paste.app_factory = nova.api.fancy_api:Wadl.factory + + which would result in a call to the `Wadl` class as + + import nova.api.fancy_api + fancy_api.Wadl(latest_version='1.3') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + return cls(**local_config) + def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: @@ -104,20 +147,65 @@ class Application(object): class Middleware(Application): - """ - Base WSGI middleware wrapper. These classes require an application to be + """Base WSGI middleware. + + These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ - def __init__(self, application): # pylint: disable-msg=W0231 + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config fles. + + Any local configuration (that is, values under the [filter:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [filter:analytics] + redis_host = 127.0.0.1 + paste.filter_factory = nova.api.analytics:Analytics.factory + + which would result in a call to the `Analytics` class as + + import nova.api.analytics + analytics.Analytics(app_from_paste, redis_host='127.0.0.1') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + def _factory(app): + return cls(app, **local_config) + return _factory + + def __init__(self, application): self.application = application + def process_request(self, req): + """Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + @webob.dec.wsgify - def __call__(self, req): # pylint: disable-msg=W0221 - """Override to implement middleware behavior.""" - return self.application + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + return self.process_response(response) class Debug(Middleware): @@ -303,7 +391,7 @@ class Serializer(object): try: is_xml = (datastring[0] == '<') if not is_xml: - return json.loads(datastring) + return utils.loads(datastring) return self._from_xml(datastring) except: return None @@ -336,7 +424,7 @@ class Serializer(object): return result def _to_json(self, data): - return json.dumps(data) + return utils.dumps(data) def _to_xml(self, data): metadata = self.metadata.get('application/xml', {}) @@ -372,3 +460,64 @@ class Serializer(object): node = doc.createTextNode(str(data)) result.appendChild(node) return result + + +def paste_config_file(basename): + """Find the best location in the system for a paste config file. + + Search Order + ------------ + + The search for a paste config file honors `FLAGS.state_path`, which in a + version checked out from bzr will be the `nova` directory in the top level + of the checkout, and in an installation for a package for your distribution + will likely point to someplace like /etc/nova. + + This method tries to load places likely to be used in development or + experimentation before falling back to the system-wide configuration + in `/etc/nova/`. + + * Current working directory + * the `etc` directory under state_path, because when working on a checkout + from bzr this will point to the default + * top level of FLAGS.state_path, for distributions + * /etc/nova, which may not be diffrerent from state_path on your distro + + """ + + configfiles = [basename, + os.path.join(FLAGS.state_path, 'etc', basename), + os.path.join(FLAGS.state_path, basename), + '/etc/nova/%s' % basename] + for configfile in configfiles: + if os.path.exists(configfile): + return configfile + + +def load_paste_configuration(filename, appname): + """Returns a paste configuration dict, or None.""" + filename = os.path.abspath(filename) + config = None + try: + config = deploy.appconfig("config:%s" % filename, name=appname) + except LookupError: + pass + return config + + +def load_paste_app(filename, appname): + """Builds a wsgi app from a paste config, None if app not configured.""" + filename = os.path.abspath(filename) + app = None + try: + app = deploy.loadapp("config:%s" % filename, name=appname) + except LookupError: + pass + return app + + +def paste_config_to_flags(config, mixins): + for k, v in mixins.iteritems(): + value = config.get(k, v) + converted_value = FLAGS[k].parser.Parse(value) + setattr(FLAGS, k, converted_value) |
