diff options
| author | Brian Waldon <brian.waldon@rackspace.com> | 2011-03-25 10:55:07 -0400 |
|---|---|---|
| committer | Brian Waldon <brian.waldon@rackspace.com> | 2011-03-25 10:55:07 -0400 |
| commit | 6c29d4a09574fd230a5fe3b0bbfa615fe18b328c (patch) | |
| tree | 5bfb174908c77c7bf1a3effadf54779d085953b8 /nova | |
| parent | 3b8f1f54136a67ba4c306e47b25b686328ec23b5 (diff) | |
| parent | 162af7b79631b151f03bb46773a1448e6c051325 (diff) | |
merging trunk
Diffstat (limited to 'nova')
92 files changed, 8011 insertions, 2127 deletions
diff --git a/nova/api/direct.py b/nova/api/direct.py index dfca250e0..e5f33cee4 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -38,6 +38,7 @@ import routes import webob from nova import context +from nova import exception from nova import flags from nova import utils from nova import wsgi @@ -205,10 +206,53 @@ class ServiceWrapper(wsgi.Controller): # NOTE(vish): make sure we have no unicode keys for py2.6. params = dict([(str(k), v) for (k, v) in params.iteritems()]) result = method(context, **params) - if type(result) is dict or type(result) is list: - return self._serialize(result, req.best_match_content_type()) - else: + if result is None or type(result) is str or type(result) is unicode: return result + try: + return self._serialize(result, req.best_match_content_type()) + except: + raise exception.Error("returned non-serializable type: %s" + % result) + + +class Limited(object): + __notdoc = """Limit the available methods on a given object. + + (Not a docstring so that the docstring can be conditionally overriden.) + + Useful when defining a public API that only exposes a subset of an + internal API. + + Expected usage of this class is to define a subclass that lists the allowed + methods in the 'allowed' variable. + + Additionally where appropriate methods can be added or overwritten, for + example to provide backwards compatibility. + + The wrapping approach has been chosen so that the wrapped API can maintain + its own internal consistency, for example if it calls "self.create" it + should get its own create method rather than anything we do here. + + """ + + _allowed = None + + def __init__(self, proxy): + self._proxy = proxy + if not self.__doc__: + self.__doc__ = proxy.__doc__ + if not self._allowed: + self._allowed = [] + + def __getattr__(self, key): + """Only return methods that are named in self._allowed.""" + if key not in self._allowed: + raise AttributeError() + return getattr(self._proxy, key) + + def __dir__(self): + """Only return methods that are named in self._allowed.""" + return [x for x in dir(self._proxy) if x in self._allowed] class Proxy(object): diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 20701cfa8..a3c3b25a1 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -61,10 +61,13 @@ class RequestLogging(wsgi.Middleware): return rv def log_request_completion(self, response, request, start): - controller = request.environ.get('ec2.controller', None) - if controller: - controller = controller.__class__.__name__ - action = request.environ.get('ec2.action', None) + apireq = request.environ.get('ec2.request', None) + if apireq: + controller = apireq.controller + action = apireq.action + else: + controller = None + action = None ctxt = request.environ.get('ec2.context', None) delta = utils.utcnow() - start seconds = delta.seconds @@ -75,7 +78,7 @@ class RequestLogging(wsgi.Middleware): microseconds, request.remote_addr, request.method, - request.path_info, + "%s%s" % (request.script_name, request.path_info), controller, action, response.status_int, diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index d8d90ad83..6a5609d4a 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -304,7 +304,7 @@ class AdminController(object): * Volume (up, down, None) * Volume Count """ - services = db.service_get_all(context) + services = db.service_get_all(context, False) now = datetime.datetime.utcnow() hosts = [] rv = [] diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index e257e44e7..0da642318 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -196,7 +196,7 @@ class CloudController(object): def _describe_availability_zones(self, context, **kwargs): ctxt = context.elevated() - enabled_services = db.service_get_all(ctxt) + enabled_services = db.service_get_all(ctxt, False) disabled_services = db.service_get_all(ctxt, True) available_zones = [] for zone in [service.availability_zone for service @@ -221,7 +221,7 @@ class CloudController(object): rv = {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} - services = db.service_get_all(context) + services = db.service_get_all(context, False) now = datetime.datetime.utcnow() hosts = [] for host in [service['host'] for service in services]: @@ -541,7 +541,7 @@ class CloudController(object): volumes = [] for ec2_id in volume_id: internal_id = ec2utils.ec2_id_to_id(ec2_id) - volume = self.volume_api.get(context, internal_id) + volume = self.volume_api.get(context, volume_id=internal_id) volumes.append(volume) else: volumes = self.volume_api.get_all(context) @@ -585,9 +585,11 @@ class CloudController(object): def create_volume(self, context, size, **kwargs): LOG.audit(_("Create volume of %s GB"), size, context=context) - volume = self.volume_api.create(context, size, - kwargs.get('display_name'), - kwargs.get('display_description')) + volume = self.volume_api.create( + context, + size=size, + name=kwargs.get('display_name'), + description=kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. @@ -606,7 +608,9 @@ class CloudController(object): if field in kwargs: changes[field] = kwargs[field] if changes: - self.volume_api.update(context, volume_id, kwargs) + self.volume_api.update(context, + volume_id=volume_id, + fields=changes) return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): @@ -619,7 +623,7 @@ class CloudController(object): instance_id=instance_id, volume_id=volume_id, device=device) - volume = self.volume_api.get(context, volume_id) + volume = self.volume_api.get(context, volume_id=volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': ec2utils.id_to_ec2_id(instance_id), @@ -630,7 +634,7 @@ class CloudController(object): def detach_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) LOG.audit(_("Detach volume %s"), volume_id, context=context) - volume = self.volume_api.get(context, volume_id) + volume = self.volume_api.get(context, volume_id=volume_id) instance = self.compute_api.detach_volume(context, volume_id=volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], @@ -768,7 +772,7 @@ class CloudController(object): def release_address(self, context, public_ip, **kwargs): LOG.audit(_("Release address %s"), public_ip, context=context) - self.network_api.release_floating_ip(context, public_ip) + self.network_api.release_floating_ip(context, address=public_ip) return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): @@ -782,7 +786,7 @@ class CloudController(object): def disassociate_address(self, context, public_ip, **kwargs): LOG.audit(_("Disassociate address %s"), public_ip, context=context) - self.network_api.disassociate_floating_ip(context, public_ip) + self.network_api.disassociate_floating_ip(context, address=public_ip) return {'disassociateResponse': ["Address disassociated."]} def run_instances(self, context, **kwargs): diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 143b1d2b2..727655a86 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -35,6 +35,7 @@ from nova.api.openstack import flavors from nova.api.openstack import images from nova.api.openstack import limits from nova.api.openstack import servers +from nova.api.openstack import server_metadata from nova.api.openstack import shared_ip_groups from nova.api.openstack import users from nova.api.openstack import zones @@ -71,7 +72,7 @@ class APIRouter(wsgi.Router): """Simple paste factory, :class:`nova.wsgi.Router` doesn't have one""" return cls() - def __init__(self): + def __init__(self, ext_mgr=None): self.server_members = {} mapper = routes.Mapper() self._setup_routes(mapper) @@ -117,9 +118,6 @@ class APIRouter(wsgi.Router): mapper.resource("image", "images", controller=images.Controller(), collection={'detail': 'GET'}) - mapper.resource("flavor", "flavors", controller=flavors.Controller(), - collection={'detail': 'GET'}) - mapper.resource("shared_ip_group", "shared_ip_groups", collection={'detail': 'GET'}, controller=shared_ip_groups.Controller()) @@ -138,6 +136,10 @@ class APIRouterV10(APIRouter): collection={'detail': 'GET'}, member=self.server_members) + mapper.resource("flavor", "flavors", + controller=flavors.ControllerV10(), + collection={'detail': 'GET'}) + class APIRouterV11(APIRouter): """Define routes specific to OpenStack API V1.1.""" @@ -148,6 +150,14 @@ class APIRouterV11(APIRouter): controller=servers.ControllerV11(), collection={'detail': 'GET'}, member=self.server_members) + mapper.resource("server_meta", "meta", + controller=server_metadata.Controller(), + parent_resource=dict(member_name='server', + collection_name='servers')) + + mapper.resource("flavor", "flavors", + controller=flavors.ControllerV11(), + collection={'detail': 'GET'}) class Versions(wsgi.Application): diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py index 2510ffb61..86066fa20 100644 --- a/nova/api/openstack/accounts.py +++ b/nova/api/openstack/accounts.py @@ -14,6 +14,7 @@ # under the License. import common +import webob.exc from nova import exception from nova import flags @@ -51,10 +52,10 @@ class Controller(wsgi.Controller): raise exception.NotAuthorized(_("Not admin user.")) def index(self, req): - raise faults.Fault(exc.HTTPNotImplemented()) + raise faults.Fault(webob.exc.HTTPNotImplemented()) def detail(self, req): - raise faults.Fault(exc.HTTPNotImplemented()) + raise faults.Fault(webob.exc.HTTPNotImplemented()) def show(self, req, id): """Return data about the given account id""" @@ -69,7 +70,7 @@ class Controller(wsgi.Controller): def create(self, req): """We use update with create-or-update semantics because the id comes from an external source""" - raise faults.Fault(exc.HTTPNotImplemented()) + raise faults.Fault(webob.exc.HTTPNotImplemented()) def update(self, req, id): """This is really create or update.""" diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index bff050347..8cad1273a 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -20,9 +20,12 @@ from urlparse import urlparse import webob from nova import exception +from nova import flags +FLAGS = flags.FLAGS -def limited(items, request, max_limit=1000): + +def limited(items, request, max_limit=FLAGS.osapi_max_limit): """ Return a slice of items according to requested offset and limit. @@ -56,6 +59,36 @@ def limited(items, request, max_limit=1000): return items[offset:range_end] +def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): + """Return a slice of items according to the requested marker and limit.""" + + try: + marker = int(request.GET.get('marker', 0)) + except ValueError: + raise webob.exc.HTTPBadRequest(_('marker param must be an integer')) + + try: + limit = int(request.GET.get('limit', max_limit)) + except ValueError: + raise webob.exc.HTTPBadRequest(_('limit param must be an integer')) + + if limit < 0: + raise webob.exc.HTTPBadRequest(_('limit param must be positive')) + + limit = min(max_limit, limit) + start_index = 0 + if marker: + start_index = -1 + for i, item in enumerate(items): + if item['id'] == marker: + start_index = i + 1 + break + if start_index < 0: + raise webob.exc.HTTPBadRequest(_('marker [%s] not found' % marker)) + range_end = start_index + limit + return items[start_index:range_end] + + def get_image_id_from_image_hash(image_service, context, image_hash): """Given an Image ID Hash, return an objectstore Image ID. diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py new file mode 100644 index 000000000..9d98d849a --- /dev/null +++ b/nova/api/openstack/extensions.py @@ -0,0 +1,369 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import imp +import os +import sys +import routes +import webob.dec +import webob.exc + +from nova import flags +from nova import log as logging +from nova import wsgi +from nova.api.openstack import faults + + +LOG = logging.getLogger('extensions') + + +FLAGS = flags.FLAGS + + +class ActionExtensionController(wsgi.Controller): + + def __init__(self, application): + + self.application = application + self.action_handlers = {} + + def add_action(self, action_name, handler): + self.action_handlers[action_name] = handler + + def action(self, req, id): + + input_dict = self._deserialize(req.body, req.get_content_type()) + for action_name, handler in self.action_handlers.iteritems(): + if action_name in input_dict: + return handler(input_dict, req, id) + # no action handler found (bump to downstream application) + res = self.application + return res + + +class ResponseExtensionController(wsgi.Controller): + + def __init__(self, application): + self.application = application + self.handlers = [] + + def add_handler(self, handler): + self.handlers.append(handler) + + def process(self, req, *args, **kwargs): + res = req.get_response(self.application) + content_type = req.best_match_content_type() + # currently response handlers are un-ordered + for handler in self.handlers: + res = handler(res) + try: + body = res.body + headers = res.headers + except AttributeError: + body = self._serialize(res, content_type) + headers = {"Content-Type": content_type} + res = webob.Response() + res.body = body + res.headers = headers + return res + + +class ExtensionController(wsgi.Controller): + + def __init__(self, extension_manager): + self.extension_manager = extension_manager + + def _translate(self, ext): + ext_data = {} + ext_data['name'] = ext.get_name() + ext_data['alias'] = ext.get_alias() + ext_data['description'] = ext.get_description() + ext_data['namespace'] = ext.get_namespace() + ext_data['updated'] = ext.get_updated() + ext_data['links'] = [] # TODO: implement extension links + return ext_data + + def index(self, req): + extensions = [] + for alias, ext in self.extension_manager.extensions.iteritems(): + extensions.append(self._translate(ext)) + return dict(extensions=extensions) + + def show(self, req, id): + # NOTE: the extensions alias is used as the 'id' for show + ext = self.extension_manager.extensions[id] + return self._translate(ext) + + def delete(self, req, id): + raise faults.Fault(exc.HTTPNotFound()) + + def create(self, req): + raise faults.Fault(exc.HTTPNotFound()) + + def delete(self, req, id): + raise faults.Fault(exc.HTTPNotFound()) + + +class ExtensionMiddleware(wsgi.Middleware): + """ + Extensions middleware that intercepts configured routes for extensions. + """ + @classmethod + def factory(cls, global_config, **local_config): + """ paste factory """ + def _factory(app): + return cls(app, **local_config) + return _factory + + def _action_ext_controllers(self, application, ext_mgr, mapper): + """ + Return a dict of ActionExtensionController objects by collection + """ + action_controllers = {} + for action in ext_mgr.get_actions(): + if not action.collection in action_controllers.keys(): + controller = ActionExtensionController(application) + mapper.connect("/%s/:(id)/action.:(format)" % + action.collection, + action='action', + controller=controller, + conditions=dict(method=['POST'])) + mapper.connect("/%s/:(id)/action" % action.collection, + action='action', + controller=controller, + conditions=dict(method=['POST'])) + action_controllers[action.collection] = controller + + return action_controllers + + def _response_ext_controllers(self, application, ext_mgr, mapper): + """ + Return a dict of ResponseExtensionController objects by collection + """ + response_ext_controllers = {} + for resp_ext in ext_mgr.get_response_extensions(): + if not resp_ext.key in response_ext_controllers.keys(): + controller = ResponseExtensionController(application) + mapper.connect(resp_ext.url_route + '.:(format)', + action='process', + controller=controller, + conditions=resp_ext.conditions) + + mapper.connect(resp_ext.url_route, + action='process', + controller=controller, + conditions=resp_ext.conditions) + response_ext_controllers[resp_ext.key] = controller + + return response_ext_controllers + + def __init__(self, application, ext_mgr=None): + + if ext_mgr is None: + ext_mgr = ExtensionManager(FLAGS.osapi_extensions_path) + self.ext_mgr = ext_mgr + + mapper = routes.Mapper() + + # extended resources + for resource in ext_mgr.get_resources(): + LOG.debug(_('Extended resource: %s'), + resource.collection) + mapper.resource(resource.collection, resource.collection, + controller=resource.controller, + collection=resource.collection_actions, + member=resource.member_actions, + parent_resource=resource.parent) + + # extended actions + action_controllers = self._action_ext_controllers(application, ext_mgr, + mapper) + for action in ext_mgr.get_actions(): + LOG.debug(_('Extended action: %s'), action.action_name) + controller = action_controllers[action.collection] + controller.add_action(action.action_name, action.handler) + + # extended responses + resp_controllers = self._response_ext_controllers(application, ext_mgr, + mapper) + for response_ext in ext_mgr.get_response_extensions(): + LOG.debug(_('Extended response: %s'), response_ext.key) + controller = resp_controllers[response_ext.key] + controller.add_handler(response_ext.handler) + + self._router = routes.middleware.RoutesMiddleware(self._dispatch, + mapper) + + super(ExtensionMiddleware, self).__init__(application) + + @webob.dec.wsgify(RequestClass=wsgi.Request) + def __call__(self, req): + """ + Route the incoming request with router. + """ + req.environ['extended.app'] = self.application + return self._router + + @staticmethod + @webob.dec.wsgify(RequestClass=wsgi.Request) + def _dispatch(req): + """ + Returns the routed WSGI app's response or defers to the extended + application. + """ + match = req.environ['wsgiorg.routing_args'][1] + if not match: + return req.environ['extended.app'] + app = match['controller'] + return app + + +class ExtensionManager(object): + """ + Load extensions from the configured extension path. + See nova/tests/api/openstack/extensions/foxinsocks.py for an example + extension implementation. + """ + + def __init__(self, path): + LOG.audit(_('Initializing extension manager.')) + + self.path = path + self.extensions = {} + self._load_extensions() + + def get_resources(self): + """ + returns a list of ResourceExtension objects + """ + resources = [] + resources.append(ResourceExtension('extensions', + ExtensionController(self))) + for alias, ext in self.extensions.iteritems(): + try: + resources.extend(ext.get_resources()) + except AttributeError: + # NOTE: Extension aren't required to have resource extensions + pass + return resources + + def get_actions(self): + """ + returns a list of ActionExtension objects + """ + actions = [] + for alias, ext in self.extensions.iteritems(): + try: + actions.extend(ext.get_actions()) + except AttributeError: + # NOTE: Extension aren't required to have action extensions + pass + return actions + + def get_response_extensions(self): + """ + returns a list of ResponseExtension objects + """ + response_exts = [] + for alias, ext in self.extensions.iteritems(): + try: + response_exts.extend(ext.get_response_extensions()) + except AttributeError: + # NOTE: Extension aren't required to have response extensions + pass + return response_exts + + def _check_extension(self, extension): + """ + Checks for required methods in extension objects. + """ + try: + LOG.debug(_('Ext name: %s'), extension.get_name()) + LOG.debug(_('Ext alias: %s'), extension.get_alias()) + LOG.debug(_('Ext description: %s'), extension.get_description()) + LOG.debug(_('Ext namespace: %s'), extension.get_namespace()) + LOG.debug(_('Ext updated: %s'), extension.get_updated()) + except AttributeError as ex: + LOG.exception(_("Exception loading extension: %s"), unicode(ex)) + + def _load_extensions(self): + """ + Load extensions from the configured path. The extension name is + constructed from the module_name. If your extension module was named + widgets.py the extension class within that module should be + 'Widgets'. + + See nova/tests/api/openstack/extensions/foxinsocks.py for an example + extension implementation. + """ + if not os.path.exists(self.path): + return + + for f in os.listdir(self.path): + LOG.audit(_('Loading extension file: %s'), f) + mod_name, file_ext = os.path.splitext(os.path.split(f)[-1]) + ext_path = os.path.join(self.path, f) + if file_ext.lower() == '.py': + mod = imp.load_source(mod_name, ext_path) + ext_name = mod_name[0].upper() + mod_name[1:] + try: + new_ext = getattr(mod, ext_name)() + self._check_extension(new_ext) + self.extensions[new_ext.get_alias()] = new_ext + except AttributeError as ex: + LOG.exception(_("Exception loading extension: %s"), + unicode(ex)) + + +class ResponseExtension(object): + """ + ResponseExtension objects can be used to add data to responses from + core nova OpenStack API controllers. + """ + + def __init__(self, method, url_route, handler): + self.url_route = url_route + self.handler = handler + self.conditions = dict(method=[method]) + self.key = "%s-%s" % (method, url_route) + + +class ActionExtension(object): + """ + ActionExtension objects can be used to add custom actions to core nova + nova OpenStack API controllers. + """ + + def __init__(self, collection, action_name, handler): + self.collection = collection + self.action_name = action_name + self.handler = handler + + +class ResourceExtension(object): + """ + ResourceExtension objects can be used to add top level resources + to the OpenStack API in nova. + """ + + def __init__(self, collection, controller, parent=None, + collection_actions={}, member_actions={}): + self.collection = collection + self.controller = controller + self.parent = parent + self.collection_actions = collection_actions + self.member_actions = member_actions diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index c99b945fb..5b99b5a6f 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -15,16 +15,12 @@ # License for the specific language governing permissions and limitations # under the License. -from webob import exc +import webob from nova import db -from nova import context -from nova.api.openstack import faults -from nova.api.openstack import common -from nova.compute import instance_types -from nova.api.openstack.views import flavors as flavors_views +from nova import exception from nova import wsgi -import nova.api.openstack +from nova.api.openstack import views class Controller(wsgi.Controller): @@ -33,33 +29,50 @@ class Controller(wsgi.Controller): _serialization_metadata = { 'application/xml': { "attributes": { - "flavor": ["id", "name", "ram", "disk"]}}} + "flavor": ["id", "name", "ram", "disk"], + "link": ["rel", "type", "href"], + } + } + } def index(self, req): """Return all flavors in brief.""" - return dict(flavors=[dict(id=flavor['id'], name=flavor['name']) - for flavor in self.detail(req)['flavors']]) + items = self._get_flavors(req, is_detail=False) + return dict(flavors=items) def detail(self, req): """Return all flavors in detail.""" - items = [self.show(req, id)['flavor'] for id in self._all_ids(req)] + items = self._get_flavors(req, is_detail=True) return dict(flavors=items) + def _get_flavors(self, req, is_detail=True): + """Helper function that returns a list of flavor dicts.""" + ctxt = req.environ['nova.context'] + flavors = db.api.instance_type_get_all(ctxt) + builder = self._get_view_builder(req) + items = [builder.build(flavor, is_detail=is_detail) + for flavor in flavors.values()] + return items + def show(self, req, id): """Return data about the given flavor id.""" - ctxt = req.environ['nova.context'] - flavor = db.api.instance_type_get_by_flavor_id(ctxt, id) - values = { - "id": flavor["flavorid"], - "name": flavor["name"], - "ram": flavor["memory_mb"], - "disk": flavor["local_gb"], - } + try: + ctxt = req.environ['nova.context'] + flavor = db.api.instance_type_get_by_flavor_id(ctxt, id) + except exception.NotFound: + return webob.exc.HTTPNotFound() + + builder = self._get_view_builder(req) + values = builder.build(flavor, is_detail=True) return dict(flavor=values) - def _all_ids(self, req): - """Return the list of all flavorids.""" - ctxt = req.environ['nova.context'] - inst_types = db.api.instance_type_get_all(ctxt) - flavor_ids = [inst_types[i]['flavorid'] for i in inst_types.keys()] - return sorted(flavor_ids) + +class ControllerV10(Controller): + def _get_view_builder(self, req): + return views.flavors.ViewBuilder() + + +class ControllerV11(Controller): + def _get_view_builder(self, req): + base_url = req.application_url + return views.flavors.ViewBuilderV11(base_url) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 99c14275a..79852ecc6 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -15,10 +15,14 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime + from webob import exc from nova import compute +from nova import exception from nova import flags +from nova import log from nova import utils from nova import wsgi import nova.api.openstack @@ -27,6 +31,8 @@ from nova.api.openstack import faults import nova.image.service +LOG = log.getLogger('nova.api.openstack.images') + FLAGS = flags.FLAGS @@ -84,8 +90,6 @@ def _translate_status(item): # S3ImageService pass - return item - def _filter_keys(item, keys): """ @@ -104,6 +108,100 @@ def _convert_image_id_to_hash(image): image['id'] = image_id +def _translate_s3_like_images(image_metadata): + """Work-around for leaky S3ImageService abstraction""" + api_metadata = image_metadata.copy() + _convert_image_id_to_hash(api_metadata) + api_metadata = _translate_keys(api_metadata) + _translate_status(api_metadata) + return api_metadata + + +def _translate_from_image_service_to_api(image_metadata): + """Translate from ImageService to OpenStack API style attribute names + + This involves 4 steps: + + 1. Filter out attributes that the OpenStack API doesn't need + + 2. Translate from base image attributes from names used by + BaseImageService to names used by OpenStack API + + 3. Add in any image properties + + 4. Format values according to API spec (for example dates must + look like "2010-08-10T12:00:00Z") + """ + service_metadata = image_metadata.copy() + properties = service_metadata.pop('properties', {}) + + # 1. Filter out unecessary attributes + api_keys = ['id', 'name', 'updated_at', 'created_at', 'status'] + api_metadata = utils.subset_dict(service_metadata, api_keys) + + # 2. Translate base image attributes + api_map = {'updated_at': 'updated', 'created_at': 'created'} + api_metadata = utils.map_dict_keys(api_metadata, api_map) + + # 3. Add in any image properties + # 3a. serverId is used for backups and snapshots + try: + api_metadata['serverId'] = int(properties['instance_id']) + except KeyError: + pass # skip if it's not present + except ValueError: + pass # skip if it's not an integer + + # 3b. Progress special case + # TODO(sirp): ImageService doesn't have a notion of progress yet, so for + # now just fake it + if service_metadata['status'] == 'saving': + api_metadata['progress'] = 0 + + # 4. Format values + # 4a. Format Image Status (API requires uppercase) + api_metadata['status'] = _format_status_for_api(api_metadata['status']) + + # 4b. Format timestamps + for attr in ('created', 'updated'): + if attr in api_metadata: + api_metadata[attr] = _format_datetime_for_api( + api_metadata[attr]) + + return api_metadata + + +def _format_status_for_api(status): + """Return status in a format compliant with OpenStack API""" + mapping = {'queued': 'QUEUED', + 'preparing': 'PREPARING', + 'saving': 'SAVING', + 'active': 'ACTIVE', + 'killed': 'FAILED'} + return mapping[status] + + +def _format_datetime_for_api(datetime_): + """Stringify datetime objects in a format compliant with OpenStack API""" + API_DATETIME_FMT = '%Y-%m-%dT%H:%M:%SZ' + return datetime_.strftime(API_DATETIME_FMT) + + +def _safe_translate(image_metadata): + """Translate attributes for OpenStack API, temporary workaround for + S3ImageService attribute leakage. + """ + # FIXME(sirp): The S3ImageService appears to be leaking implementation + # details, including its internal attribute names, and internal + # `status` values. Working around it for now. + s3_like_image = ('imageId' in image_metadata) + if s3_like_image: + translate = _translate_s3_like_images + else: + translate = _translate_from_image_service_to_api + return translate(image_metadata) + + class Controller(wsgi.Controller): _serialization_metadata = { @@ -117,34 +215,32 @@ class Controller(wsgi.Controller): def index(self, req): """Return all public images in brief""" - items = self._service.index(req.environ['nova.context']) - items = common.limited(items, req) - items = [_filter_keys(item, ('id', 'name')) for item in items] - return dict(images=items) + context = req.environ['nova.context'] + image_metas = self._service.index(context) + image_metas = common.limited(image_metas, req) + return dict(images=image_metas) def detail(self, req): """Return all public images in detail""" - try: - items = self._service.detail(req.environ['nova.context']) - except NotImplementedError: - items = self._service.index(req.environ['nova.context']) - for image in items: - _convert_image_id_to_hash(image) - - items = common.limited(items, req) - items = [_translate_keys(item) for item in items] - items = [_translate_status(item) for item in items] - return dict(images=items) + context = req.environ['nova.context'] + image_metas = self._service.detail(context) + image_metas = common.limited(image_metas, req) + api_image_metas = [_safe_translate(image_meta) + for image_meta in image_metas] + return dict(images=api_image_metas) def show(self, req, id): """Return data about the given image id""" - image_id = common.get_image_id_from_image_hash(self._service, - req.environ['nova.context'], id) + context = req.environ['nova.context'] + try: + image_id = common.get_image_id_from_image_hash( + self._service, context, id) + except exception.NotFound: + raise faults.Fault(exc.HTTPNotFound()) - image = self._service.show(req.environ['nova.context'], image_id) - _convert_image_id_to_hash(image) - self._format_image_dates(image) - return dict(image=image) + image_meta = self._service.show(context, image_id) + api_image_meta = _safe_translate(image_meta) + return dict(image=api_image_meta) def delete(self, req, id): # Only public images are supported for now. @@ -155,18 +251,12 @@ class Controller(wsgi.Controller): env = self._deserialize(req.body, req.get_content_type()) instance_id = env["image"]["serverId"] name = env["image"]["name"] - image_meta = compute.API().snapshot( context, instance_id, name) - - return dict(image=image_meta) + api_image_meta = _safe_translate(image_meta) + return dict(image=api_image_meta) def update(self, req, id): # Users may not modify public images, and that's all that # we support for now. raise faults.Fault(exc.HTTPNotFound()) - - def _format_image_dates(self, image): - for attr in ['created_at', 'updated_at', 'deleted_at']: - if image.get(attr) is not None: - image[attr] = image[attr].strftime('%Y-%m-%dT%H:%M:%SZ') diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py new file mode 100644 index 000000000..45bbac99d --- /dev/null +++ b/nova/api/openstack/server_metadata.py @@ -0,0 +1,78 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from webob import exc + +from nova import compute +from nova import wsgi +from nova.api.openstack import faults + + +class Controller(wsgi.Controller): + """ The server metadata API controller for the Openstack API """ + + def __init__(self): + self.compute_api = compute.API() + super(Controller, self).__init__() + + def _get_metadata(self, context, server_id): + metadata = self.compute_api.get_instance_metadata(context, server_id) + meta_dict = {} + for key, value in metadata.iteritems(): + meta_dict[key] = value + return dict(metadata=meta_dict) + + def index(self, req, server_id): + """ Returns the list of metadata for a given instance """ + context = req.environ['nova.context'] + return self._get_metadata(context, server_id) + + def create(self, req, server_id): + context = req.environ['nova.context'] + body = self._deserialize(req.body, req.get_content_type()) + self.compute_api.update_or_create_instance_metadata(context, + server_id, + body['metadata']) + return req.body + + def update(self, req, server_id, id): + context = req.environ['nova.context'] + body = self._deserialize(req.body, req.get_content_type()) + if not id in body: + expl = _('Request body and URI mismatch') + raise exc.HTTPBadRequest(explanation=expl) + if len(body) > 1: + expl = _('Request body contains too many items') + raise exc.HTTPBadRequest(explanation=expl) + self.compute_api.update_or_create_instance_metadata(context, + server_id, + body) + return req.body + + def show(self, req, server_id, id): + """ Return a single metadata item """ + context = req.environ['nova.context'] + data = self._get_metadata(context, server_id) + if id in data['metadata']: + return {id: data['metadata'][id]} + else: + return faults.Fault(exc.HTTPNotFound()) + + def delete(self, req, server_id, id): + """ Deletes an existing metadata """ + context = req.environ['nova.context'] + self.compute_api.delete_instance_metadata(context, server_id, id) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ff9076003..05027bf1e 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -15,19 +15,19 @@ import base64 import hashlib -import json import traceback -from xml.dom import minidom from webob import exc +from xml.dom import minidom from nova import compute from nova import context from nova import exception from nova import flags from nova import log as logging -from nova import wsgi +from nova import quota from nova import utils +from nova import wsgi from nova.api.openstack import common from nova.api.openstack import faults import nova.api.openstack.views.addresses @@ -36,8 +36,8 @@ import nova.api.openstack.views.servers from nova.auth import manager as auth_manager from nova.compute import instance_types from nova.compute import power_state -from nova.quota import QuotaError import nova.api.openstack +from nova.scheduler import api as scheduler_api LOG = logging.getLogger('server') @@ -86,21 +86,24 @@ class Controller(wsgi.Controller): builder - the response model builder """ instance_list = self.compute_api.get_all(req.environ['nova.context']) - limited_list = common.limited(instance_list, req) + limited_list = self._limit_items(instance_list, req) builder = self._get_view_builder(req) servers = [builder.build(inst, is_detail)['server'] for inst in limited_list] return dict(servers=servers) + @scheduler_api.redirect_handler def show(self, req, id): """ Returns server details by server id """ try: - instance = self.compute_api.get(req.environ['nova.context'], id) + instance = self.compute_api.routing_get( + req.environ['nova.context'], id) builder = self._get_view_builder(req) return builder.build(instance, is_detail=True) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) + @scheduler_api.redirect_handler def delete(self, req, id): """ Destroys a server """ try: @@ -160,8 +163,8 @@ class Controller(wsgi.Controller): key_data=key_data, metadata=metadata, injected_files=injected_files) - except QuotaError as error: - self._handle_quota_errors(error) + except quota.QuotaError as error: + self._handle_quota_error(error) inst['instance_type'] = flavor_id inst['image_id'] = requested_image_id @@ -215,7 +218,7 @@ class Controller(wsgi.Controller): injected_files.append((path, contents)) return injected_files - def _handle_quota_errors(self, error): + def _handle_quota_error(self, error): """ Reraise quota errors as api-specific http exceptions """ @@ -231,6 +234,7 @@ class Controller(wsgi.Controller): # if the original error is okay, just reraise it raise error + @scheduler_api.redirect_handler def update(self, req, id): """ Updates the server name or password """ if len(req.body) == 0: @@ -246,7 +250,7 @@ class Controller(wsgi.Controller): update_dict['admin_pass'] = inst_dict['server']['adminPass'] try: self.compute_api.set_admin_password(ctxt, id) - except exception.TimeoutException, e: + except exception.TimeoutException: return exc.HTTPRequestTimeout() if 'name' in inst_dict['server']: update_dict['display_name'] = inst_dict['server']['name'] @@ -256,6 +260,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPNoContent() + @scheduler_api.redirect_handler def action(self, req, id): """Multi-purpose method used to reboot, rebuild, or resize a server""" @@ -321,6 +326,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @scheduler_api.redirect_handler def lock(self, req, id): """ lock the instance with id @@ -336,6 +342,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @scheduler_api.redirect_handler def unlock(self, req, id): """ unlock the instance with id @@ -351,6 +358,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @scheduler_api.redirect_handler def get_lock(self, req, id): """ return the boolean state of (instance with id)'s lock @@ -365,6 +373,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @scheduler_api.redirect_handler def reset_network(self, req, id): """ Reset networking on an instance (admin only). @@ -379,6 +388,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @scheduler_api.redirect_handler def inject_network_info(self, req, id): """ Inject network info for an instance (admin only). @@ -393,6 +403,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @scheduler_api.redirect_handler def pause(self, req, id): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] @@ -404,6 +415,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @scheduler_api.redirect_handler def unpause(self, req, id): """ Permit Admins to Unpause the server. """ ctxt = req.environ['nova.context'] @@ -415,6 +427,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @scheduler_api.redirect_handler def suspend(self, req, id): """permit admins to suspend the server""" context = req.environ['nova.context'] @@ -426,6 +439,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @scheduler_api.redirect_handler def resume(self, req, id): """permit admins to resume the server from suspend""" context = req.environ['nova.context'] @@ -437,6 +451,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @scheduler_api.redirect_handler def rescue(self, req, id): """Permit users to rescue the server.""" context = req.environ["nova.context"] @@ -448,6 +463,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @scheduler_api.redirect_handler def unrescue(self, req, id): """Permit users to unrescue the server.""" context = req.environ["nova.context"] @@ -459,6 +475,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + @scheduler_api.redirect_handler def get_ajax_console(self, req, id): """ Returns a url to an instance's ajaxterm console. """ try: @@ -468,6 +485,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() + @scheduler_api.redirect_handler def diagnostics(self, req, id): """Permit Admins to retrieve server diagnostics.""" ctxt = req.environ["nova.context"] @@ -537,6 +555,9 @@ class ControllerV10(Controller): def _get_addresses_view_builder(self, req): return nova.api.openstack.views.addresses.ViewBuilderV10(req) + def _limit_items(self, items, req): + return common.limited(items, req) + class ControllerV11(Controller): def _image_id_from_req_data(self, data): @@ -560,6 +581,9 @@ class ControllerV11(Controller): def _get_addresses_view_builder(self, req): return nova.api.openstack.views.addresses.ViewBuilderV11(req) + def _limit_items(self, items, req): + return common.limited_by_marker(items, req) + class ServerCreateRequestXMLDeserializer(object): """ diff --git a/nova/api/openstack/views/flavors.py b/nova/api/openstack/views/flavors.py index 18bd779c0..462890ab2 100644 --- a/nova/api/openstack/views/flavors.py +++ b/nova/api/openstack/views/flavors.py @@ -19,16 +19,78 @@ from nova.api.openstack import common class ViewBuilder(object): - def __init__(self): - pass - def build(self, flavor_obj): - raise NotImplementedError() + def build(self, flavor_obj, is_detail=False): + """Generic method used to generate a flavor entity.""" + if is_detail: + flavor = self._build_detail(flavor_obj) + else: + flavor = self._build_simple(flavor_obj) + + self._build_extra(flavor) + + return flavor + + def _build_simple(self, flavor_obj): + """Build a minimal representation of a flavor.""" + return { + "id": flavor_obj["flavorid"], + "name": flavor_obj["name"], + } + + def _build_detail(self, flavor_obj): + """Build a more complete representation of a flavor.""" + simple = self._build_simple(flavor_obj) + + detail = { + "ram": flavor_obj["memory_mb"], + "disk": flavor_obj["local_gb"], + } + + detail.update(simple) + + return detail + + def _build_extra(self, flavor_obj): + """Hook for version-specific changes to newly created flavor object.""" + pass class ViewBuilderV11(ViewBuilder): + """Openstack API v1.1 flavors view builder.""" + def __init__(self, base_url): + """ + :param base_url: url of the root wsgi application + """ self.base_url = base_url + def _build_extra(self, flavor_obj): + flavor_obj["links"] = self._build_links(flavor_obj) + + def _build_links(self, flavor_obj): + """Generate a container of links that refer to the provided flavor.""" + href = self.generate_href(flavor_obj["id"]) + + links = [ + { + "rel": "self", + "href": href, + }, + { + "rel": "bookmark", + "type": "application/json", + "href": href, + }, + { + "rel": "bookmark", + "type": "application/xml", + "href": href, + }, + ] + + return links + def generate_href(self, flavor_id): + """Create an url that refers to a specific flavor id.""" return "%s/flavors/%s" % (self.base_url, flavor_id) diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index 8fe84275a..846cb48a1 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -15,9 +15,10 @@ import common +from nova import db from nova import flags +from nova import log as logging from nova import wsgi -from nova import db from nova.scheduler import api @@ -38,7 +39,8 @@ def _exclude_keys(item, keys): def _scrub_zone(zone): - return _filter_keys(zone, ('id', 'api_url')) + return _exclude_keys(zone, ('username', 'password', 'created_at', + 'deleted', 'deleted_at', 'updated_at')) class Controller(wsgi.Controller): @@ -52,13 +54,9 @@ class Controller(wsgi.Controller): """Return all zones in brief""" # Ask the ZoneManager in the Scheduler for most recent data, # or fall-back to the database ... - items = api.API().get_zone_list(req.environ['nova.context']) - if not items: - items = db.zone_get_all(req.environ['nova.context']) - + items = api.get_zone_list(req.environ['nova.context']) items = common.limited(items, req) - items = [_exclude_keys(item, ['username', 'password']) - for item in items] + items = [_scrub_zone(item) for item in items] return dict(zones=items) def detail(self, req): @@ -67,29 +65,37 @@ class Controller(wsgi.Controller): def info(self, req): """Return name and capabilities for this zone.""" - return dict(zone=dict(name=FLAGS.zone_name, - capabilities=FLAGS.zone_capabilities)) + items = api.get_zone_capabilities(req.environ['nova.context']) + + zone = dict(name=FLAGS.zone_name) + caps = FLAGS.zone_capabilities + for cap in caps: + key, value = cap.split('=') + zone[key] = value + for item, (min_value, max_value) in items.iteritems(): + zone[item] = "%s,%s" % (min_value, max_value) + return dict(zone=zone) def show(self, req, id): """Return data about the given zone id""" zone_id = int(id) - zone = db.zone_get(req.environ['nova.context'], zone_id) + zone = api.zone_get(req.environ['nova.context'], zone_id) return dict(zone=_scrub_zone(zone)) def delete(self, req, id): zone_id = int(id) - db.zone_delete(req.environ['nova.context'], zone_id) + api.zone_delete(req.environ['nova.context'], zone_id) return {} def create(self, req): context = req.environ['nova.context'] env = self._deserialize(req.body, req.get_content_type()) - zone = db.zone_create(context, env["zone"]) + zone = api.zone_create(context, env["zone"]) return dict(zone=_scrub_zone(zone)) def update(self, req, id): context = req.environ['nova.context'] env = self._deserialize(req.body, req.get_content_type()) zone_id = int(id) - zone = db.zone_update(context, zone_id, env["zone"]) + zone = api.zone_update(context, zone_id, env["zone"]) return dict(zone=_scrub_zone(zone)) diff --git a/nova/compute/api.py b/nova/compute/api.py index 01eead4ac..266cbe677 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -34,6 +34,7 @@ from nova import rpc from nova import utils from nova import volume from nova.compute import instance_types +from nova.scheduler import api as scheduler_api from nova.db import base FLAGS = flags.FLAGS @@ -352,6 +353,7 @@ class API(base.Base): rv = self.db.instance_update(context, instance_id, kwargs) return dict(rv.iteritems()) + @scheduler_api.reroute_compute("delete") def delete(self, context, instance_id): LOG.debug(_("Going to try to terminate %s"), instance_id) try: @@ -384,24 +386,37 @@ class API(base.Base): rv = self.db.instance_get(context, instance_id) return dict(rv.iteritems()) + @scheduler_api.reroute_compute("get") + def routing_get(self, context, instance_id): + """Use this method instead of get() if this is the only + operation you intend to to. It will route to novaclient.get + if the instance is not found.""" + return self.get(context, instance_id) + def get_all(self, context, project_id=None, reservation_id=None, fixed_ip=None): """Get all instances, possibly filtered by one of the given parameters. If there is no filter and the context is - an admin, it will retreive all instances in the system.""" + an admin, it will retreive all instances in the system. + """ if reservation_id is not None: - return self.db.instance_get_all_by_reservation(context, - reservation_id) + return self.db.instance_get_all_by_reservation( + context, reservation_id) + if fixed_ip is not None: return self.db.fixed_ip_get_instance(context, fixed_ip) + if project_id or not context.is_admin: if not context.project: - return self.db.instance_get_all_by_user(context, - context.user_id) + return self.db.instance_get_all_by_user( + context, context.user_id) + if project_id is None: project_id = context.project_id - return self.db.instance_get_all_by_project(context, - project_id) + + return self.db.instance_get_all_by_project( + context, project_id) + return self.db.instance_get_all(context) def _cast_compute_message(self, method, context, instance_id, host=None, @@ -451,12 +466,15 @@ class API(base.Base): :retval: A dict containing image metadata """ - data = {'name': name, 'is_public': False} - image_meta = self.image_service.create(context, data) - params = {'image_id': image_meta['id']} + properties = {'instance_id': str(instance_id), + 'user_id': str(context.user_id)} + sent_meta = {'name': name, 'is_public': False, + 'properties': properties} + recv_meta = self.image_service.create(context, sent_meta) + params = {'image_id': recv_meta['id']} self._cast_compute_message('snapshot_instance', context, instance_id, params=params) - return image_meta + return recv_meta def reboot(self, context, instance_id): """Reboot the given instance.""" @@ -527,14 +545,17 @@ class API(base.Base): "instance_id": instance_id, "flavor_id": flavor_id}}) + @scheduler_api.reroute_compute("pause") def pause(self, context, instance_id): """Pause the given instance.""" self._cast_compute_message('pause_instance', context, instance_id) + @scheduler_api.reroute_compute("unpause") def unpause(self, context, instance_id): """Unpause the given instance.""" self._cast_compute_message('unpause_instance', context, instance_id) + @scheduler_api.reroute_compute("diagnostics") def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for the given instance.""" return self._call_compute_message( @@ -546,18 +567,22 @@ class API(base.Base): """Retrieve actions for the given instance.""" return self.db.instance_get_actions(context, instance_id) + @scheduler_api.reroute_compute("suspend") def suspend(self, context, instance_id): """suspend the instance with instance_id""" self._cast_compute_message('suspend_instance', context, instance_id) + @scheduler_api.reroute_compute("resume") def resume(self, context, instance_id): """resume the instance with instance_id""" self._cast_compute_message('resume_instance', context, instance_id) + @scheduler_api.reroute_compute("rescue") def rescue(self, context, instance_id): """Rescue the given instance.""" self._cast_compute_message('rescue_instance', context, instance_id) + @scheduler_api.reroute_compute("unrescue") def unrescue(self, context, instance_id): """Unrescue the given instance.""" self._cast_compute_message('unrescue_instance', context, instance_id) @@ -573,7 +598,6 @@ class API(base.Base): def get_ajax_console(self, context, instance_id): """Get a url to an AJAX Console""" - instance = self.get(context, instance_id) output = self._call_compute_message('get_ajax_console', context, instance_id) @@ -621,7 +645,7 @@ class API(base.Base): if not re.match("^/dev/[a-z]d[a-z]+$", device): raise exception.ApiError(_("Invalid device specified: %s. " "Example device: /dev/vdb") % device) - self.volume_api.check_attach(context, volume_id) + self.volume_api.check_attach(context, volume_id=volume_id) instance = self.get(context, instance_id) host = instance['host'] rpc.cast(context, @@ -635,7 +659,7 @@ class API(base.Base): instance = self.db.volume_get_instance(context.elevated(), volume_id) if not instance: raise exception.ApiError(_("Volume isn't attached to anything!")) - self.volume_api.check_detach(context, volume_id) + self.volume_api.check_detach(context, volume_id=volume_id) host = instance['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), @@ -646,5 +670,21 @@ class API(base.Base): def associate_floating_ip(self, context, instance_id, address): instance = self.get(context, instance_id) - self.network_api.associate_floating_ip(context, address, - instance['fixed_ip']) + self.network_api.associate_floating_ip(context, + floating_ip=address, + fixed_ip=instance['fixed_ip']) + + def get_instance_metadata(self, context, instance_id): + """Get all metadata associated with an instance.""" + rv = self.db.instance_metadata_get(context, instance_id) + return dict(rv.iteritems()) + + def delete_instance_metadata(self, context, instance_id, key): + """Delete the given metadata item""" + self.db.instance_metadata_delete(context, instance_id, key) + + def update_or_create_instance_metadata(self, context, instance_id, + metadata): + """Updates or creates instance metadata""" + self.db.instance_metadata_update_or_create(context, instance_id, + metadata) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7316d1510..468771f46 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -111,7 +111,7 @@ def checks_instance_lock(function): return decorated_function -class ComputeManager(manager.Manager): +class ComputeManager(manager.SchedulerDependentManager): """Manages the running instances from creation to destruction.""" @@ -132,7 +132,8 @@ class ComputeManager(manager.Manager): self.network_manager = utils.import_object(FLAGS.network_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager) - super(ComputeManager, self).__init__(*args, **kwargs) + super(ComputeManager, self).__init__(service_name="compute", + *args, **kwargs) def init_host(self): """Do any initialization that needs to be run if this is a diff --git a/nova/console/vmrc.py b/nova/console/vmrc.py new file mode 100644 index 000000000..521da289f --- /dev/null +++ b/nova/console/vmrc.py @@ -0,0 +1,144 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +VMRC console drivers. +""" + +import base64 +import json + +from nova import exception +from nova import flags +from nova import log as logging +from nova.virt.vmwareapi import vim_util + +flags.DEFINE_integer('console_vmrc_port', + 443, + "port for VMware VMRC connections") +flags.DEFINE_integer('console_vmrc_error_retries', + 10, + "number of retries for retrieving VMRC information") + +FLAGS = flags.FLAGS + + +class VMRCConsole(object): + """VMRC console driver with ESX credentials.""" + + def __init__(self): + super(VMRCConsole, self).__init__() + + @property + def console_type(self): + return 'vmrc+credentials' + + def get_port(self, context): + """Get available port for consoles.""" + return FLAGS.console_vmrc_port + + def setup_console(self, context, console): + """Sets up console.""" + pass + + def teardown_console(self, context, console): + """Tears down console.""" + pass + + def init_host(self): + """Perform console initialization.""" + pass + + def fix_pool_password(self, password): + """Encode password.""" + # TODO(sateesh): Encrypt pool password + return password + + def generate_password(self, vim_session, pool, instance_name): + """ + Returns VMRC Connection credentials. + + Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'. + """ + username, password = pool['username'], pool['password'] + vms = vim_session._call_method(vim_util, "get_objects", + "VirtualMachine", ["name", "config.files.vmPathName"]) + vm_ds_path_name = None + vm_ref = None + for vm in vms: + vm_name = None + ds_path_name = None + for prop in vm.propSet: + if prop.name == "name": + vm_name = prop.val + elif prop.name == "config.files.vmPathName": + ds_path_name = prop.val + if vm_name == instance_name: + vm_ref = vm.obj + vm_ds_path_name = ds_path_name + break + if vm_ref is None: + raise exception.NotFound(_("instance - %s not present") % + instance_name) + json_data = json.dumps({"vm_id": vm_ds_path_name, + "username": username, + "password": password}) + return base64.b64encode(json_data) + + def is_otp(self): + """Is one time password or not.""" + return False + + +class VMRCSessionConsole(VMRCConsole): + """VMRC console driver with VMRC One Time Sessions.""" + + def __init__(self): + super(VMRCSessionConsole, self).__init__() + + @property + def console_type(self): + return 'vmrc+session' + + def generate_password(self, vim_session, pool, instance_name): + """ + Returns a VMRC Session. + + Return string is of the form '<VM MOID>:<VMRC Ticket>'. + """ + vms = vim_session._call_method(vim_util, "get_objects", + "VirtualMachine", ["name"]) + vm_ref = None + for vm in vms: + if vm.propSet[0].val == instance_name: + vm_ref = vm.obj + if vm_ref is None: + raise exception.NotFound(_("instance - %s not present") % + instance_name) + virtual_machine_ticket = \ + vim_session._call_method( + vim_session._get_vim(), + "AcquireCloneTicket", + vim_session._get_vim().get_service_content().sessionManager) + json_data = json.dumps({"vm_id": str(vm_ref.value), + "username": virtual_machine_ticket, + "password": virtual_machine_ticket}) + return base64.b64encode(json_data) + + def is_otp(self): + """Is one time password or not.""" + return True diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py new file mode 100644 index 000000000..09beac7a0 --- /dev/null +++ b/nova/console/vmrc_manager.py @@ -0,0 +1,158 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +VMRC Console Manager. +""" + +from nova import exception +from nova import flags +from nova import log as logging +from nova import manager +from nova import rpc +from nova import utils +from nova.virt.vmwareapi_conn import VMWareAPISession + +LOG = logging.getLogger("nova.console.vmrc_manager") + +FLAGS = flags.FLAGS +flags.DEFINE_string('console_public_hostname', + '', + 'Publicly visible name for this console host') +flags.DEFINE_string('console_driver', + 'nova.console.vmrc.VMRCConsole', + 'Driver to use for the console') + + +class ConsoleVMRCManager(manager.Manager): + + """ + Manager to handle VMRC connections needed for accessing instance consoles. + """ + + def __init__(self, console_driver=None, *args, **kwargs): + self.driver = utils.import_object(FLAGS.console_driver) + super(ConsoleVMRCManager, self).__init__(*args, **kwargs) + + def init_host(self): + self.sessions = {} + self.driver.init_host() + + def _get_vim_session(self, pool): + """Get VIM session for the pool specified.""" + vim_session = None + if pool['id'] not in self.sessions.keys(): + vim_session = VMWareAPISession(pool['address'], + pool['username'], + pool['password'], + FLAGS.console_vmrc_error_retries) + self.sessions[pool['id']] = vim_session + return self.sessions[pool['id']] + + def _generate_console(self, context, pool, name, instance_id, instance): + """Sets up console for the instance.""" + LOG.debug(_("Adding console")) + + password = self.driver.generate_password( + self._get_vim_session(pool), + pool, + instance.name) + + console_data = {'instance_name': name, + 'instance_id': instance_id, + 'password': password, + 'pool_id': pool['id']} + console_data['port'] = self.driver.get_port(context) + console = self.db.console_create(context, console_data) + self.driver.setup_console(context, console) + return console + + @exception.wrap_exception + def add_console(self, context, instance_id, password=None, + port=None, **kwargs): + """ + Adds a console for the instance. If it is one time password, then we + generate new console credentials. + """ + instance = self.db.instance_get(context, instance_id) + host = instance['host'] + name = instance['name'] + pool = self.get_pool_for_instance_host(context, host) + try: + console = self.db.console_get_by_pool_instance(context, + pool['id'], + instance_id) + if self.driver.is_otp(): + console = self._generate_console( + context, + pool, + name, + instance_id, + instance) + except exception.NotFound: + console = self._generate_console( + context, + pool, + name, + instance_id, + instance) + return console['id'] + + @exception.wrap_exception + def remove_console(self, context, console_id, **_kwargs): + """Removes a console entry.""" + try: + console = self.db.console_get(context, console_id) + except exception.NotFound: + LOG.debug(_("Tried to remove non-existent console " + "%(console_id)s.") % + {'console_id': console_id}) + return + LOG.debug(_("Removing console " + "%(console_id)s.") % + {'console_id': console_id}) + self.db.console_delete(context, console_id) + self.driver.teardown_console(context, console) + + def get_pool_for_instance_host(self, context, instance_host): + """Gets console pool info for the instance.""" + context = context.elevated() + console_type = self.driver.console_type + try: + pool = self.db.console_pool_get_by_host_type(context, + instance_host, + self.host, + console_type) + except exception.NotFound: + pool_info = rpc.call(context, + self.db.queue_get_for(context, + FLAGS.compute_topic, + instance_host), + {"method": "get_console_pool_info", + "args": {"console_type": console_type}}) + pool_info['password'] = self.driver.fix_pool_password( + pool_info['password']) + pool_info['host'] = self.host + # ESX Address or Proxy Address + public_host_name = pool_info['address'] + if FLAGS.console_public_hostname: + public_host_name = FLAGS.console_public_hostname + pool_info['public_hostname'] = public_host_name + pool_info['console_type'] = console_type + pool_info['compute_host'] = instance_host + pool = self.db.console_pool_create(context, pool_info) + return pool diff --git a/nova/db/api.py b/nova/db/api.py index afc1bff2f..fd3c63b76 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -71,6 +71,7 @@ class NoMoreTargets(exception.Error): """No more available blades""" pass + ################### @@ -89,7 +90,7 @@ def service_get_by_host_and_topic(context, host, topic): return IMPL.service_get_by_host_and_topic(context, host, topic) -def service_get_all(context, disabled=False): +def service_get_all(context, disabled=None): """Get all services.""" return IMPL.service_get_all(context, disabled) @@ -1171,3 +1172,21 @@ def zone_get(context, zone_id): def zone_get_all(context): """Get all child Zones.""" return IMPL.zone_get_all(context) + + +#################### + + +def instance_metadata_get(context, instance_id): + """Get all metadata for an instance""" + return IMPL.instance_metadata_get(context, instance_id) + + +def instance_metadata_delete(context, instance_id, key): + """Delete the given metadata item""" + IMPL.instance_metadata_delete(context, instance_id, key) + + +def instance_metadata_update_or_create(context, instance_id, metadata): + """Create or update instance metadata""" + IMPL.instance_metadata_update_or_create(context, instance_id, metadata) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index d7b5aff46..b2a13a01b 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -143,12 +143,15 @@ def service_get(context, service_id, session=None): @require_admin_context -def service_get_all(context, disabled=False): +def service_get_all(context, disabled=None): session = get_session() - return session.query(models.Service).\ - filter_by(deleted=can_read_deleted(context)).\ - filter_by(disabled=disabled).\ - all() + query = session.query(models.Service).\ + filter_by(deleted=can_read_deleted(context)) + + if disabled is not None: + query = query.filter_by(disabled=disabled) + + return query.all() @require_admin_context @@ -2209,7 +2212,7 @@ def migration_get(context, id, session=None): filter_by(id=id).first() if not result: raise exception.NotFound(_("No migration found with id %s") - % migration_id) + % id) return result @@ -2386,7 +2389,7 @@ def instance_type_get_by_flavor_id(context, id): filter_by(flavorid=int(id)).\ first() if not inst_type: - raise exception.NotFound(_("No flavor with name %s") % id) + raise exception.NotFound(_("No flavor with flavorid %s") % id) else: return dict(inst_type) @@ -2432,6 +2435,7 @@ def zone_create(context, values): @require_admin_context def zone_update(context, zone_id, values): + session = get_session() zone = session.query(models.Zone).filter_by(id=zone_id).first() if not zone: raise exception.NotFound(_("No zone with id %(zone_id)s") % locals()) @@ -2462,3 +2466,65 @@ def zone_get(context, zone_id): def zone_get_all(context): session = get_session() return session.query(models.Zone).all() + + +#################### + +@require_context +def instance_metadata_get(context, instance_id): + session = get_session() + + meta_results = session.query(models.InstanceMetadata).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False).\ + all() + + meta_dict = {} + for i in meta_results: + meta_dict[i['key']] = i['value'] + return meta_dict + + +@require_context +def instance_metadata_delete(context, instance_id, key): + session = get_session() + session.query(models.InstanceMetadata).\ + filter_by(instance_id=instance_id).\ + filter_by(key=key).\ + filter_by(deleted=False).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def instance_metadata_get_item(context, instance_id, key): + session = get_session() + + meta_result = session.query(models.InstanceMetadata).\ + filter_by(instance_id=instance_id).\ + filter_by(key=key).\ + filter_by(deleted=False).\ + first() + + if not meta_result: + raise exception.NotFound(_('Invalid metadata key for instance %s') % + instance_id) + return meta_result + + +@require_context +def instance_metadata_update_or_create(context, instance_id, metadata): + session = get_session() + meta_ref = None + for key, value in metadata.iteritems(): + try: + meta_ref = instance_metadata_get_item(context, instance_id, key, + session) + except: + meta_ref = models.InstanceMetadata() + meta_ref.update({"key": key, "value": value, + "instance_id": instance_id, + "deleted": 0}) + meta_ref.save(session=session) + return metadata diff --git a/nova/flags.py b/nova/flags.py index 9123e9ac7..f011ab383 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -298,10 +298,14 @@ DEFINE_string('ec2_dmz_host', '$my_ip', 'internal ip of api server') DEFINE_integer('ec2_port', 8773, 'cloud controller port') DEFINE_string('ec2_scheme', 'http', 'prefix for ec2') DEFINE_string('ec2_path', '/services/Cloud', 'suffix for ec2') +DEFINE_string('osapi_extensions_path', '/var/lib/nova/extensions', + 'default directory for nova extensions') DEFINE_string('osapi_host', '$my_ip', 'ip of api server') DEFINE_string('osapi_scheme', 'http', 'prefix for openstack') DEFINE_integer('osapi_port', 8774, 'OpenStack API port') DEFINE_string('osapi_path', '/v1.0/', 'suffix for openstack') +DEFINE_integer('osapi_max_limit', 1000, + 'max number of items returned in a collection response') DEFINE_string('default_project', 'openstack', 'default project for openstack') DEFINE_string('default_image', 'ami-11111', @@ -358,5 +362,6 @@ DEFINE_string('node_availability_zone', 'nova', 'availability zone of this node') DEFINE_string('zone_name', 'nova', 'name of this zone') -DEFINE_string('zone_capabilities', 'kypervisor:xenserver;os:linux', - 'Key/Value tags which represent capabilities of this zone') +DEFINE_list('zone_capabilities', + ['hypervisor=xenserver;kvm', 'os=linux;windows'], + 'Key/Multi-value list representng capabilities of this zone') diff --git a/nova/image/glance.py b/nova/image/glance.py index 171b28fde..be9805b69 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -39,7 +39,17 @@ GlanceClient = utils.import_class('glance.client.Client') class GlanceImageService(service.BaseImageService): """Provides storage and retrieval of disk image objects within Glance.""" + GLANCE_ONLY_ATTRS = ["size", "location", "disk_format", + "container_format"] + + # NOTE(sirp): Overriding to use _translate_to_service provided by + # BaseImageService + SERVICE_IMAGE_ATTRS = service.BaseImageService.BASE_IMAGE_ATTRS +\ + GLANCE_ONLY_ATTRS + def __init__(self, client=None): + # FIXME(sirp): can we avoid dependency-injection here by using + # stubbing out a fake? if client is None: self.client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port) else: @@ -49,39 +59,43 @@ class GlanceImageService(service.BaseImageService): """ Calls out to Glance for a list of images available """ - return self.client.get_images() + # NOTE(sirp): We need to use `get_images_detailed` and not + # `get_images` here because we need `is_public` and `properties` + # included so we can filter by user + filtered = [] + image_metas = self.client.get_images_detailed() + for image_meta in image_metas: + if self._is_image_available(context, image_meta): + meta_subset = utils.subset_dict(image_meta, ('id', 'name')) + filtered.append(meta_subset) + return filtered def detail(self, context): """ Calls out to Glance for a list of detailed image information """ - return [self._convert_timestamps_to_datetimes(image) - for image in self.client.get_images_detailed()] + filtered = [] + image_metas = self.client.get_images_detailed() + for image_meta in image_metas: + if self._is_image_available(context, image_meta): + base_image_meta = self._translate_to_base(image_meta) + filtered.append(base_image_meta) + return filtered def show(self, context, image_id): """ Returns a dict containing image data for the given opaque image id. """ try: - image = self.client.get_image_meta(image_id) + image_meta = self.client.get_image_meta(image_id) except glance_exception.NotFound: raise exception.NotFound - return self._convert_timestamps_to_datetimes(image) - def _convert_timestamps_to_datetimes(self, image): - """ - Returns image with known timestamp fields converted to datetime objects - """ - for attr in ['created_at', 'updated_at', 'deleted_at']: - if image.get(attr) is not None: - image[attr] = self._parse_glance_iso8601_timestamp(image[attr]) - return image + if not self._is_image_available(context, image_meta): + raise exception.NotFound - def _parse_glance_iso8601_timestamp(self, timestamp): - """ - Parse a subset of iso8601 timestamps into datetime objects - """ - return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f") + base_image_meta = self._translate_to_base(image_meta) + return base_image_meta def show_by_name(self, context, name): """ @@ -89,56 +103,67 @@ class GlanceImageService(service.BaseImageService): """ # TODO(vish): replace this with more efficient call when glance # supports it. - images = self.detail(context) - image = None - for cantidate in images: - if name == cantidate.get('name'): - image = cantidate - break - if image is None: - raise exception.NotFound - return image + image_metas = self.detail(context) + for image_meta in image_metas: + if name == image_meta.get('name'): + return image_meta + raise exception.NotFound def get(self, context, image_id, data): """ Calls out to Glance for metadata and data and writes data. """ try: - metadata, image_chunks = self.client.get_image(image_id) + image_meta, image_chunks = self.client.get_image(image_id) except glance_exception.NotFound: raise exception.NotFound + for chunk in image_chunks: data.write(chunk) - return self._convert_timestamps_to_datetimes(metadata) - def create(self, context, metadata, data=None): + base_image_meta = self._translate_to_base(image_meta) + return base_image_meta + + def create(self, context, image_meta, data=None): """ Store the image data and return the new image id. :raises AlreadyExists if the image already exist. - """ - return self._convert_timestamps_to_datetimes( - self.client.add_image(metadata, data)) + # Translate Base -> Service + LOG.debug(_("Creating image in Glance. Metadata passed in %s"), + image_meta) + sent_service_image_meta = self._translate_to_service(image_meta) + LOG.debug(_("Metadata after formatting for Glance %s"), + sent_service_image_meta) + + recv_service_image_meta = self.client.add_image( + sent_service_image_meta, data) + + # Translate Service -> Base + base_image_meta = self._translate_to_base(recv_service_image_meta) + LOG.debug(_("Metadata returned from Glance formatted for Base %s"), + base_image_meta) + return base_image_meta - def update(self, context, image_id, metadata, data=None): + def update(self, context, image_id, image_meta, data=None): """Replace the contents of the given image with the new data. :raises NotFound if the image does not exist. - """ try: - metadata = self.client.update_image(image_id, metadata, data) + image_meta = self.client.update_image(image_id, image_meta, data) except glance_exception.NotFound: raise exception.NotFound - return self._convert_timestamps_to_datetimes(metadata) + + base_image_meta = self._translate_to_base(image_meta) + return base_image_meta def delete(self, context, image_id): """ Delete the given image. :raises NotFound if the image does not exist. - """ try: result = self.client.delete_image(image_id) @@ -151,3 +176,62 @@ class GlanceImageService(service.BaseImageService): Clears out all images """ pass + + @classmethod + def _translate_to_base(cls, image_meta): + """Overriding the base translation to handle conversion to datetime + objects + """ + image_meta = service.BaseImageService._translate_to_base(image_meta) + image_meta = _convert_timestamps_to_datetimes(image_meta) + return image_meta + + @staticmethod + def _is_image_available(context, image_meta): + """ + Images are always available if they are public or if the user is an + admin. + + Otherwise, we filter by project_id (if present) and then fall-back to + images owned by user. + """ + # FIXME(sirp): We should be filtering by user_id on the Glance side + # for security; however, we can't do that until we get authn/authz + # sorted out. Until then, filtering in Nova. + if image_meta['is_public'] or context.is_admin: + return True + + properties = image_meta['properties'] + + if context.project_id and ('project_id' in properties): + return str(properties['project_id']) == str(project_id) + + try: + user_id = properties['user_id'] + except KeyError: + return False + + return str(user_id) == str(context.user_id) + + +# utility functions +def _convert_timestamps_to_datetimes(image_meta): + """ + Returns image with known timestamp fields converted to datetime objects + """ + for attr in ['created_at', 'updated_at', 'deleted_at']: + if image_meta.get(attr) is not None: + image_meta[attr] = _parse_glance_iso8601_timestamp( + image_meta[attr]) + return image_meta + + +def _parse_glance_iso8601_timestamp(timestamp): + """ + Parse a subset of iso8601 timestamps into datetime objects + """ + GLANCE_FMT = "%Y-%m-%dT%H:%M:%S" + ISO_FMT = "%Y-%m-%dT%H:%M:%S.%f" + # FIXME(sirp): Glance is not returning in ISO format, we should fix Glance + # to do so, and then switch to parsing it here + return datetime.datetime.strptime(timestamp, GLANCE_FMT) diff --git a/nova/image/local.py b/nova/image/local.py index 609d6c42a..1fb6e1f13 100644 --- a/nova/image/local.py +++ b/nova/image/local.py @@ -24,6 +24,7 @@ from nova import exception from nova import flags from nova import log as logging from nova.image import service +from nova import utils FLAGS = flags.FLAGS @@ -63,8 +64,12 @@ class LocalImageService(service.BaseImageService): return images def index(self, context): - return [dict(image_id=i['id'], name=i.get('name')) - for i in self.detail(context)] + filtered = [] + image_metas = self.detail(context) + for image_meta in image_metas: + meta = utils.subset_dict(image_meta, ('id', 'name')) + filtered.append(meta) + return filtered def detail(self, context): images = [] diff --git a/nova/image/service.py b/nova/image/service.py index e907381c9..b9897ecae 100644 --- a/nova/image/service.py +++ b/nova/image/service.py @@ -16,9 +16,33 @@ # under the License. +from nova import utils + + class BaseImageService(object): + """Base class for providing image search and retrieval services + + ImageService exposes two concepts of metadata: + + 1. First-class attributes: This is metadata that is common to all + ImageService subclasses and is shared across all hypervisors. These + attributes are defined by IMAGE_ATTRS. + + 2. Properties: This is metdata that is specific to an ImageService, + and Image, or a particular hypervisor. Any attribute not present in + BASE_IMAGE_ATTRS should be considered an image property. + + This means that ImageServices will return BASE_IMAGE_ATTRS as keys in the + metadata dict, all other attributes will be returned as keys in the nested + 'properties' dict. + """ + BASE_IMAGE_ATTRS = ['id', 'name', 'created_at', 'updated_at', + 'deleted_at', 'deleted', 'status', 'is_public'] - """Base class for providing image search and retrieval services""" + # NOTE(sirp): ImageService subclasses may override this to aid translation + # between BaseImageService attributes and additional metadata stored by + # the ImageService subclass + SERVICE_IMAGE_ATTRS = [] def index(self, context): """ @@ -111,3 +135,38 @@ class BaseImageService(object): """ raise NotImplementedError + + @classmethod + def _translate_to_base(cls, metadata): + """Return a metadata dictionary that is BaseImageService compliant. + + This is used by subclasses to expose only a metadata dictionary that + is the same across ImageService implementations. + """ + return cls._propertify_metadata(metadata, cls.BASE_IMAGE_ATTRS) + + @classmethod + def _translate_to_service(cls, metadata): + """Return a metadata dictionary that is usable by the ImageService + subclass. + + As an example, Glance has additional attributes (like 'location'); the + BaseImageService considers these properties, but we need to translate + these back to first-class attrs for sending to Glance. This method + handles this by allowing you to specify the attributes an ImageService + considers first-class. + """ + if not cls.SERVICE_IMAGE_ATTRS: + raise NotImplementedError(_("Cannot use this without specifying " + "SERVICE_IMAGE_ATTRS for subclass")) + return cls._propertify_metadata(metadata, cls.SERVICE_IMAGE_ATTRS) + + @staticmethod + def _propertify_metadata(metadata, keys): + """Return a dict with any unrecognized keys placed in the nested + 'properties' dict. + """ + flattened = utils.flatten_dict(metadata) + attributes, properties = utils.partition_dict(flattened, keys) + attributes['properties'] = properties + return attributes diff --git a/nova/manager.py b/nova/manager.py index 3d38504bd..804a50479 100644 --- a/nova/manager.py +++ b/nova/manager.py @@ -53,11 +53,14 @@ This module provides Manager, a base class for managers. from nova import utils from nova import flags +from nova import log as logging from nova.db import base - +from nova.scheduler import api FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.manager') + class Manager(base.Base): def __init__(self, host=None, db_driver=None): @@ -74,3 +77,29 @@ class Manager(base.Base): """Do any initialization that needs to be run if this is a standalone service. Child classes should override this method.""" pass + + +class SchedulerDependentManager(Manager): + """Periodically send capability updates to the Scheduler services. + Services that need to update the Scheduler of their capabilities + should derive from this class. Otherwise they can derive from + manager.Manager directly. Updates are only sent after + update_service_capabilities is called with non-None values.""" + + def __init__(self, host=None, db_driver=None, service_name="undefined"): + self.last_capabilities = None + self.service_name = service_name + super(SchedulerDependentManager, self).__init__(host, db_driver) + + def update_service_capabilities(self, capabilities): + """Remember these capabilities to send on next periodic update.""" + self.last_capabilities = capabilities + + def periodic_tasks(self, context=None): + """Pass data back to the scheduler at a periodic interval""" + if self.last_capabilities: + LOG.debug(_("Notifying Schedulers of capabilities ...")) + api.update_service_capabilities(context, self.service_name, + self.host, self.last_capabilities) + + super(SchedulerDependentManager, self).periodic_tasks(context) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 796d6ba31..06b05366a 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -1,3 +1,5 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. @@ -210,10 +212,7 @@ class IptablesManager(object): """ def __init__(self, execute=None): if not execute: - if FLAGS.fake_network: - self.execute = lambda *args, **kwargs: ('', '') - else: - self.execute = utils.execute + self.execute = _execute else: self.execute = execute @@ -352,9 +351,6 @@ class IptablesManager(object): return new_filter -iptables_manager = IptablesManager() - - def metadata_forward(): """Create forwarding rule for metadata""" iptables_manager.ipv4['nat'].add_rule("PREROUTING", @@ -767,3 +763,6 @@ def _ip_bridge_cmd(action, params, device): cmd.extend(params) cmd.extend(['dev', device]) return cmd + + +iptables_manager = IptablesManager() diff --git a/nova/network/manager.py b/nova/network/manager.py index 34fc042e4..d994f7dc8 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -105,7 +105,7 @@ class AddressAlreadyAllocated(exception.Error): pass -class NetworkManager(manager.Manager): +class NetworkManager(manager.SchedulerDependentManager): """Implements common network manager functionality. This class must be subclassed to support specific topologies. @@ -116,7 +116,8 @@ class NetworkManager(manager.Manager): if not network_driver: network_driver = FLAGS.network_driver self.driver = utils.import_object(network_driver) - super(NetworkManager, self).__init__(*args, **kwargs) + super(NetworkManager, self).__init__(service_name='network', + *args, **kwargs) def init_host(self): """Do any initialization that needs to be run if this is a diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py new file mode 100644 index 000000000..93e6584f0 --- /dev/null +++ b/nova/network/vmwareapi_net.py @@ -0,0 +1,91 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Implements vlans for vmwareapi. +""" + +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import utils +from nova.virt.vmwareapi_conn import VMWareAPISession +from nova.virt.vmwareapi import network_utils + +LOG = logging.getLogger("nova.network.vmwareapi_net") + +FLAGS = flags.FLAGS +flags.DEFINE_string('vlan_interface', 'vmnic0', + 'Physical network adapter name in VMware ESX host for ' + 'vlan networking') + + +def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): + """Create a vlan and bridge unless they already exist.""" + # Open vmwareapi session + host_ip = FLAGS.vmwareapi_host_ip + host_username = FLAGS.vmwareapi_host_username + host_password = FLAGS.vmwareapi_host_password + if not host_ip or host_username is None or host_password is None: + raise Exception(_("Must specify vmwareapi_host_ip," + "vmwareapi_host_username " + "and vmwareapi_host_password to use" + "connection_type=vmwareapi")) + session = VMWareAPISession(host_ip, host_username, host_password, + FLAGS.vmwareapi_api_retry_count) + vlan_interface = FLAGS.vlan_interface + # Check if the vlan_interface physical network adapter exists on the host + if not network_utils.check_if_vlan_interface_exists(session, + vlan_interface): + raise exception.NotFound(_("There is no physical network adapter with " + "the name %s on the ESX host") % vlan_interface) + + # Get the vSwitch associated with the Physical Adapter + vswitch_associated = network_utils.get_vswitch_for_vlan_interface( + session, vlan_interface) + if vswitch_associated is None: + raise exception.NotFound(_("There is no virtual switch associated " + "with the physical network adapter with name %s") % + vlan_interface) + # Check whether bridge already exists and retrieve the the ref of the + # network whose name_label is "bridge" + network_ref = network_utils.get_network_with_the_name(session, bridge) + if network_ref is None: + # Create a port group on the vSwitch associated with the vlan_interface + # corresponding physical network adapter on the ESX host + network_utils.create_port_group(session, bridge, vswitch_associated, + vlan_num) + else: + # Get the vlan id and vswitch corresponding to the port group + pg_vlanid, pg_vswitch = \ + network_utils.get_vlanid_and_vswitch_for_portgroup(session, bridge) + + # Check if the vsiwtch associated is proper + if pg_vswitch != vswitch_associated: + raise exception.Invalid(_("vSwitch which contains the port group " + "%(bridge)s is not associated with the desired " + "physical adapter. Expected vSwitch is " + "%(vswitch_associated)s, but the one associated" + " is %(pg_vswitch)s") % locals()) + + # Check if the vlan id is proper for the port group + if pg_vlanid != vlan_num: + raise exception.Invalid(_("VLAN tag is not appropriate for the " + "port group %(bridge)s. Expected VLAN tag is " + "%(vlan_num)s, but the one associated with the " + "port group is %(pg_vlanid)s") % locals()) diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py deleted file mode 100644 index b213e18e8..000000000 --- a/nova/objectstore/bucket.py +++ /dev/null @@ -1,181 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Simple object store using Blobs and JSON files on disk. -""" - -import bisect -import datetime -import glob -import json -import os - -from nova import exception -from nova import flags -from nova import utils -from nova.objectstore import stored - - -FLAGS = flags.FLAGS -flags.DEFINE_string('buckets_path', '$state_path/buckets', - 'path to s3 buckets') - - -class Bucket(object): - def __init__(self, name): - self.name = name - self.path = os.path.abspath(os.path.join(FLAGS.buckets_path, name)) - if not self.path.startswith(os.path.abspath(FLAGS.buckets_path)) or \ - not os.path.isdir(self.path): - raise exception.NotFound() - - self.ctime = os.path.getctime(self.path) - - def __repr__(self): - return "<Bucket: %s>" % self.name - - @staticmethod - def all(): - """ list of all buckets """ - buckets = [] - for fn in glob.glob("%s/*.json" % FLAGS.buckets_path): - try: - json.load(open(fn)) - name = os.path.split(fn)[-1][:-5] - buckets.append(Bucket(name)) - except: - pass - - return buckets - - @staticmethod - def create(bucket_name, context): - """Create a new bucket owned by a project. - - @bucket_name: a string representing the name of the bucket to create - @context: a nova.auth.api.ApiContext object representing who owns the - bucket. - - Raises: - NotAuthorized: if the bucket is already exists or has invalid name - """ - path = os.path.abspath(os.path.join( - FLAGS.buckets_path, bucket_name)) - if not path.startswith(os.path.abspath(FLAGS.buckets_path)) or \ - os.path.exists(path): - raise exception.NotAuthorized() - - os.makedirs(path) - - with open(path + '.json', 'w') as f: - json.dump({'ownerId': context.project_id}, f) - - @property - def metadata(self): - """ dictionary of metadata around bucket, - keys are 'Name' and 'CreationDate' - """ - - return { - "Name": self.name, - "CreationDate": datetime.datetime.utcfromtimestamp(self.ctime), - } - - @property - def owner_id(self): - try: - with open(self.path + '.json') as f: - return json.load(f)['ownerId'] - except: - return None - - def is_authorized(self, context): - try: - return context.is_admin or \ - self.owner_id == context.project_id - except Exception, e: - return False - - def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False): - object_names = [] - path_length = len(self.path) - for root, dirs, files in os.walk(self.path): - for file_name in files: - object_name = os.path.join(root, file_name)[path_length + 1:] - object_names.append(object_name) - object_names.sort() - contents = [] - - start_pos = 0 - if marker: - start_pos = bisect.bisect_right(object_names, marker, start_pos) - if prefix: - start_pos = bisect.bisect_left(object_names, prefix, start_pos) - - truncated = False - for object_name in object_names[start_pos:]: - if not object_name.startswith(prefix): - break - if len(contents) >= max_keys: - truncated = True - break - object_path = self._object_path(object_name) - c = {"Key": object_name} - if not terse: - info = os.stat(object_path) - c.update({ - "LastModified": datetime.datetime.utcfromtimestamp( - info.st_mtime), - "Size": info.st_size, - }) - contents.append(c) - marker = object_name - - return { - "Name": self.name, - "Prefix": prefix, - "Marker": marker, - "MaxKeys": max_keys, - "IsTruncated": truncated, - "Contents": contents, - } - - def _object_path(self, object_name): - fn = os.path.join(self.path, object_name) - - if not fn.startswith(self.path): - raise exception.NotAuthorized() - - return fn - - def delete(self): - if len(os.listdir(self.path)) > 0: - raise exception.NotEmpty() - os.rmdir(self.path) - os.remove(self.path + '.json') - - def __getitem__(self, key): - return stored.Object(self, key) - - def __setitem__(self, key, value): - with open(self._object_path(key), 'wb') as f: - f.write(value) - - def __delitem__(self, key): - stored.Object(self, key).delete() diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py deleted file mode 100644 index 554c72848..000000000 --- a/nova/objectstore/handler.py +++ /dev/null @@ -1,478 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# -# Copyright 2010 OpenStack LLC. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Implementation of an S3-like storage server based on local files. - -Useful to test features that will eventually run on S3, or if you want to -run something locally that was once running on S3. - -We don't support all the features of S3, but it does work with the -standard S3 client for the most basic semantics. To use the standard -S3 client with this module:: - - c = S3.AWSAuthConnection("", "", server="localhost", port=8888, - is_secure=False) - c.create_bucket("mybucket") - c.put("mybucket", "mykey", "a value") - print c.get("mybucket", "mykey").body - -""" - -import datetime -import json -import multiprocessing -import os -import urllib - -from twisted.application import internet -from twisted.application import service -from twisted.web import error -from twisted.web import resource -from twisted.web import server -from twisted.web import static - -from nova import context -from nova import exception -from nova import flags -from nova import log as logging -from nova import utils -from nova.auth import manager -from nova.objectstore import bucket -from nova.objectstore import image - - -LOG = logging.getLogger('nova.objectstore.handler') -FLAGS = flags.FLAGS -flags.DEFINE_string('s3_listen_host', '', 'Host to listen on.') - - -def render_xml(request, value): - """Writes value as XML string to request""" - assert isinstance(value, dict) and len(value) == 1 - request.setHeader("Content-Type", "application/xml; charset=UTF-8") - - name = value.keys()[0] - request.write('<?xml version="1.0" encoding="UTF-8"?>\n') - request.write('<' + utils.utf8(name) + - ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">') - _render_parts(value.values()[0], request.write) - request.write('</' + utils.utf8(name) + '>') - request.finish() - - -def finish(request, content=None): - """Finalizer method for request""" - if content: - request.write(content) - request.finish() - - -def _render_parts(value, write_cb): - """Helper method to render different Python objects to XML""" - if isinstance(value, basestring): - write_cb(utils.xhtml_escape(value)) - elif isinstance(value, int) or isinstance(value, long): - write_cb(str(value)) - elif isinstance(value, datetime.datetime): - write_cb(value.strftime("%Y-%m-%dT%H:%M:%S.000Z")) - elif isinstance(value, dict): - for name, subvalue in value.iteritems(): - if not isinstance(subvalue, list): - subvalue = [subvalue] - for subsubvalue in subvalue: - write_cb('<' + utils.utf8(name) + '>') - _render_parts(subsubvalue, write_cb) - write_cb('</' + utils.utf8(name) + '>') - else: - raise Exception(_("Unknown S3 value type %r"), value) - - -def get_argument(request, key, default_value): - """Returns the request's value at key, or default_value - if not found - """ - if key in request.args: - return request.args[key][0] - return default_value - - -def get_context(request): - """Returns the supplied request's context object""" - try: - # Authorization Header format: 'AWS <access>:<secret>' - authorization_header = request.getHeader('Authorization') - if not authorization_header: - raise exception.NotAuthorized() - auth_header_value = authorization_header.split(' ')[1] - access, _ignored, secret = auth_header_value.rpartition(':') - am = manager.AuthManager() - (user, project) = am.authenticate(access, - secret, - {}, - request.method, - request.getRequestHostname(), - request.uri, - headers=request.getAllHeaders(), - check_type='s3') - rv = context.RequestContext(user, project) - LOG.audit(_("Authenticated request"), context=rv) - return rv - except exception.Error as ex: - LOG.debug(_("Authentication Failure: %s"), ex) - raise exception.NotAuthorized() - - -class ErrorHandlingResource(resource.Resource): - """Maps exceptions to 404 / 401 codes. Won't work for - exceptions thrown after NOT_DONE_YET is returned. - """ - # TODO(unassigned) (calling-all-twisted-experts): This needs to be - # plugged in to the right place in twisted... - # This doesn't look like it's the right place - # (consider exceptions in getChild; or after - # NOT_DONE_YET is returned - def render(self, request): - """Renders the response as XML""" - try: - return resource.Resource.render(self, request) - except exception.NotFound: - request.setResponseCode(404) - return '' - except exception.NotAuthorized: - request.setResponseCode(403) - return '' - - -class S3(ErrorHandlingResource): - """Implementation of an S3-like storage server based on local files.""" - def __init__(self): - ErrorHandlingResource.__init__(self) - - def getChild(self, name, request): # pylint: disable=C0103 - """Returns either the image or bucket resource""" - request.context = get_context(request) - if name == '': - return self - elif name == '_images': - return ImagesResource() - else: - return BucketResource(name) - - def render_GET(self, request): # pylint: disable=R0201 - """Renders the GET request for a list of buckets as XML""" - LOG.debug(_('List of buckets requested'), context=request.context) - buckets = [b for b in bucket.Bucket.all() - if b.is_authorized(request.context)] - - render_xml(request, {"ListAllMyBucketsResult": { - "Buckets": {"Bucket": [b.metadata for b in buckets]}, - }}) - return server.NOT_DONE_YET - - -class BucketResource(ErrorHandlingResource): - """A web resource containing an S3-like bucket""" - def __init__(self, name): - resource.Resource.__init__(self) - self.name = name - - def getChild(self, name, request): - """Returns the bucket resource itself, or the object resource - the bucket contains if a name is supplied - """ - if name == '': - return self - else: - return ObjectResource(bucket.Bucket(self.name), name) - - def render_GET(self, request): - "Returns the keys for the bucket resource""" - LOG.debug(_("List keys for bucket %s"), self.name) - - try: - bucket_object = bucket.Bucket(self.name) - except exception.NotFound: - return error.NoResource(message="No such bucket").render(request) - - if not bucket_object.is_authorized(request.context): - LOG.audit(_("Unauthorized attempt to access bucket %s"), - self.name, context=request.context) - raise exception.NotAuthorized() - - prefix = get_argument(request, "prefix", u"") - marker = get_argument(request, "marker", u"") - max_keys = int(get_argument(request, "max-keys", 1000)) - terse = int(get_argument(request, "terse", 0)) - - results = bucket_object.list_keys(prefix=prefix, - marker=marker, - max_keys=max_keys, - terse=terse) - render_xml(request, {"ListBucketResult": results}) - return server.NOT_DONE_YET - - def render_PUT(self, request): - "Creates the bucket resource""" - LOG.debug(_("Creating bucket %s"), self.name) - LOG.debug("calling bucket.Bucket.create(%r, %r)", - self.name, - request.context) - bucket.Bucket.create(self.name, request.context) - request.finish() - return server.NOT_DONE_YET - - def render_DELETE(self, request): - """Deletes the bucket resource""" - LOG.debug(_("Deleting bucket %s"), self.name) - bucket_object = bucket.Bucket(self.name) - - if not bucket_object.is_authorized(request.context): - LOG.audit(_("Unauthorized attempt to delete bucket %s"), - self.name, context=request.context) - raise exception.NotAuthorized() - - bucket_object.delete() - request.setResponseCode(204) - return '' - - -class ObjectResource(ErrorHandlingResource): - """The resource returned from a bucket""" - def __init__(self, bucket, name): - resource.Resource.__init__(self) - self.bucket = bucket - self.name = name - - def render_GET(self, request): - """Returns the object - - Raises NotAuthorized if user in request context is not - authorized to delete the object. - """ - bname = self.bucket.name - nm = self.name - LOG.debug(_("Getting object: %(bname)s / %(nm)s") % locals()) - - if not self.bucket.is_authorized(request.context): - LOG.audit(_("Unauthorized attempt to get object %(nm)s" - " from bucket %(bname)s") % locals(), - context=request.context) - raise exception.NotAuthorized() - - obj = self.bucket[urllib.unquote(self.name)] - request.setHeader("Content-Type", "application/unknown") - request.setHeader("Last-Modified", - datetime.datetime.utcfromtimestamp(obj.mtime)) - request.setHeader("Etag", '"' + obj.md5 + '"') - return static.File(obj.path).render_GET(request) - - def render_PUT(self, request): - """Modifies/inserts the object and returns a result code - - Raises NotAuthorized if user in request context is not - authorized to delete the object. - """ - nm = self.name - bname = self.bucket.name - LOG.debug(_("Putting object: %(bname)s / %(nm)s") % locals()) - - if not self.bucket.is_authorized(request.context): - LOG.audit(_("Unauthorized attempt to upload object %(nm)s to" - " bucket %(bname)s") % locals(), context=request.context) - raise exception.NotAuthorized() - - key = urllib.unquote(self.name) - request.content.seek(0, 0) - self.bucket[key] = request.content.read() - request.setHeader("Etag", '"' + self.bucket[key].md5 + '"') - finish(request) - return server.NOT_DONE_YET - - def render_DELETE(self, request): - """Deletes the object and returns a result code - - Raises NotAuthorized if user in request context is not - authorized to delete the object. - """ - nm = self.name - bname = self.bucket.name - LOG.debug(_("Deleting object: %(bname)s / %(nm)s") % locals(), - context=request.context) - - if not self.bucket.is_authorized(request.context): - LOG.audit(_("Unauthorized attempt to delete object %(nm)s from " - "bucket %(bname)s") % locals(), context=request.context) - raise exception.NotAuthorized() - - del self.bucket[urllib.unquote(self.name)] - request.setResponseCode(204) - return '' - - -class ImageResource(ErrorHandlingResource): - """A web resource representing a single image""" - isLeaf = True - - def __init__(self, name): - resource.Resource.__init__(self) - self.img = image.Image(name) - - def render_GET(self, request): - """Returns the image file""" - if not self.img.is_authorized(request.context, True): - raise exception.NotAuthorized() - return static.File(self.img.image_path, - defaultType='application/octet-stream').\ - render_GET(request) - - -class ImagesResource(resource.Resource): - """A web resource representing a list of images""" - - def getChild(self, name, _request): - """Returns itself or an ImageResource if no name given""" - if name == '': - return self - else: - return ImageResource(name) - - def render_GET(self, request): # pylint: disable=R0201 - """ returns a json listing of all images - that a user has permissions to see """ - - images = [i for i in image.Image.all() \ - if i.is_authorized(request.context, readonly=True)] - - # Bug #617776: - # We used to have 'type' in the image metadata, but this field - # should be called 'imageType', as per the EC2 specification. - # For compat with old metadata files we copy type to imageType if - # imageType is not present. - # For compat with euca2ools (and any other clients using the - # incorrect name) we copy imageType to type. - # imageType is primary if we end up with both in the metadata file - # (which should never happen). - def decorate(m): - if 'imageType' not in m and 'type' in m: - m[u'imageType'] = m['type'] - elif 'imageType' in m: - m[u'type'] = m['imageType'] - if 'displayName' not in m: - m[u'displayName'] = u'' - return m - - request.write(json.dumps([decorate(i.metadata) for i in images])) - request.finish() - return server.NOT_DONE_YET - - def render_PUT(self, request): # pylint: disable=R0201 - """ create a new registered image """ - - image_id = get_argument(request, 'image_id', u'') - image_location = get_argument(request, 'image_location', u'') - - image_path = os.path.join(FLAGS.images_path, image_id) - if ((not image_path.startswith(FLAGS.images_path)) or - os.path.exists(image_path)): - LOG.audit(_("Not authorized to upload image: invalid directory " - "%s"), - image_path, context=request.context) - raise exception.NotAuthorized() - - bucket_object = bucket.Bucket(image_location.split("/")[0]) - - if not bucket_object.is_authorized(request.context): - LOG.audit(_("Not authorized to upload image: unauthorized " - "bucket %s"), bucket_object.name, - context=request.context) - raise exception.NotAuthorized() - - LOG.audit(_("Starting image upload: %s"), image_id, - context=request.context) - p = multiprocessing.Process(target=image.Image.register_aws_image, - args=(image_id, image_location, request.context)) - p.start() - return '' - - def render_POST(self, request): # pylint: disable=R0201 - """Update image attributes: public/private""" - - # image_id required for all requests - image_id = get_argument(request, 'image_id', u'') - image_object = image.Image(image_id) - if not image_object.is_authorized(request.context): - LOG.audit(_("Not authorized to update attributes of image %s"), - image_id, context=request.context) - raise exception.NotAuthorized() - - operation = get_argument(request, 'operation', u'') - if operation: - # operation implies publicity toggle - newstatus = (operation == 'add') - LOG.audit(_("Toggling publicity flag of image %(image_id)s" - " %(newstatus)r") % locals(), context=request.context) - image_object.set_public(newstatus) - else: - # other attributes imply update - LOG.audit(_("Updating user fields on image %s"), image_id, - context=request.context) - clean_args = {} - for arg in request.args.keys(): - clean_args[arg] = request.args[arg][0] - image_object.update_user_editable_fields(clean_args) - return '' - - def render_DELETE(self, request): # pylint: disable=R0201 - """Delete a registered image""" - image_id = get_argument(request, "image_id", u"") - image_object = image.Image(image_id) - - if not image_object.is_authorized(request.context): - LOG.audit(_("Unauthorized attempt to delete image %s"), - image_id, context=request.context) - raise exception.NotAuthorized() - - image_object.delete() - LOG.audit(_("Deleted image: %s"), image_id, context=request.context) - - request.setResponseCode(204) - return '' - - -def get_site(): - """Support for WSGI-like interfaces""" - root = S3() - site = server.Site(root) - return site - - -def get_application(): - """Support WSGI-like interfaces""" - factory = get_site() - application = service.Application("objectstore") - # Disabled because of lack of proper introspection in Twisted - # or possibly different versions of twisted? - # pylint: disable=E1101 - objectStoreService = internet.TCPServer(FLAGS.s3_port, factory, - interface=FLAGS.s3_listen_host) - objectStoreService.setServiceParent(application) - return application diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py deleted file mode 100644 index c90b5b54b..000000000 --- a/nova/objectstore/image.py +++ /dev/null @@ -1,296 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Take uploaded bucket contents and register them as disk images (AMIs). -Requires decryption using keys in the manifest. -""" - - -import binascii -import glob -import json -import os -import shutil -import tarfile -from xml.etree import ElementTree - -from nova import exception -from nova import flags -from nova import utils -from nova.objectstore import bucket - - -FLAGS = flags.FLAGS -flags.DECLARE('images_path', 'nova.image.local') - - -class Image(object): - def __init__(self, image_id): - self.image_id = image_id - self.path = os.path.abspath(os.path.join(FLAGS.images_path, image_id)) - if not self.path.startswith(os.path.abspath(FLAGS.images_path)) or \ - not os.path.isdir(self.path): - raise exception.NotFound - - @property - def image_path(self): - return os.path.join(self.path, 'image') - - def delete(self): - for fn in ['info.json', 'image']: - try: - os.unlink(os.path.join(self.path, fn)) - except: - pass - try: - os.rmdir(self.path) - except: - pass - - def is_authorized(self, context, readonly=False): - # NOTE(devcamcar): Public images can be read by anyone, - # but only modified by admin or owner. - try: - return (self.metadata['isPublic'] and readonly) or \ - context.is_admin or \ - self.metadata['imageOwnerId'] == context.project_id - except: - return False - - def set_public(self, state): - md = self.metadata - md['isPublic'] = state - with open(os.path.join(self.path, 'info.json'), 'w') as f: - json.dump(md, f) - - def update_user_editable_fields(self, args): - """args is from the request parameters, so requires extra cleaning""" - fields = {'display_name': 'displayName', 'description': 'description'} - info = self.metadata - for field in fields.keys(): - if field in args: - info[fields[field]] = args[field] - with open(os.path.join(self.path, 'info.json'), 'w') as f: - json.dump(info, f) - - @staticmethod - def all(): - images = [] - for fn in glob.glob("%s/*/info.json" % FLAGS.images_path): - try: - image_id = fn.split('/')[-2] - images.append(Image(image_id)) - except: - pass - return images - - @property - def owner_id(self): - return self.metadata['imageOwnerId'] - - @property - def metadata(self): - with open(os.path.join(self.path, 'info.json')) as f: - return json.load(f) - - @staticmethod - def add(src, description, kernel=None, ramdisk=None, public=True): - """adds an image to imagestore - - @type src: str - @param src: location of the partition image on disk - - @type description: str - @param description: string describing the image contents - - @type kernel: bool or str - @param kernel: either TRUE meaning this partition is a kernel image or - a string of the image id for the kernel - - @type ramdisk: bool or str - @param ramdisk: either TRUE meaning this partition is a ramdisk image - or a string of the image id for the ramdisk - - - @type public: bool - @param public: determine if this is a public image or private - - @rtype: str - @return: a string with the image id - """ - - image_type = 'machine' - image_id = utils.generate_uid('ami') - - if kernel is True: - image_type = 'kernel' - image_id = utils.generate_uid('aki') - if ramdisk is True: - image_type = 'ramdisk' - image_id = utils.generate_uid('ari') - - image_path = os.path.join(FLAGS.images_path, image_id) - os.makedirs(image_path) - - shutil.copyfile(src, os.path.join(image_path, 'image')) - - info = { - 'imageId': image_id, - 'imageLocation': description, - 'imageOwnerId': 'system', - 'isPublic': public, - 'architecture': 'x86_64', - 'imageType': image_type, - 'state': 'available'} - - if type(kernel) is str and len(kernel) > 0: - info['kernelId'] = kernel - - if type(ramdisk) is str and len(ramdisk) > 0: - info['ramdiskId'] = ramdisk - - with open(os.path.join(image_path, 'info.json'), "w") as f: - json.dump(info, f) - - return image_id - - @staticmethod - def register_aws_image(image_id, image_location, context): - image_path = os.path.join(FLAGS.images_path, image_id) - os.makedirs(image_path) - - bucket_name = image_location.split("/")[0] - manifest_path = image_location[len(bucket_name) + 1:] - bucket_object = bucket.Bucket(bucket_name) - - manifest = ElementTree.fromstring(bucket_object[manifest_path].read()) - image_type = 'machine' - - try: - kernel_id = manifest.find("machine_configuration/kernel_id").text - if kernel_id == 'true': - image_type = 'kernel' - except: - kernel_id = None - - try: - ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text - if ramdisk_id == 'true': - image_type = 'ramdisk' - except: - ramdisk_id = None - - try: - arch = manifest.find("machine_configuration/architecture").text - except: - arch = 'x86_64' - - info = { - 'imageId': image_id, - 'imageLocation': image_location, - 'imageOwnerId': context.project_id, - 'isPublic': False, # FIXME: grab public from manifest - 'architecture': arch, - 'imageType': image_type} - - if kernel_id: - info['kernelId'] = kernel_id - - if ramdisk_id: - info['ramdiskId'] = ramdisk_id - - def write_state(state): - info['imageState'] = state - with open(os.path.join(image_path, 'info.json'), "w") as f: - json.dump(info, f) - - write_state('pending') - - encrypted_filename = os.path.join(image_path, 'image.encrypted') - with open(encrypted_filename, 'w') as f: - for filename in manifest.find("image").getiterator("filename"): - shutil.copyfileobj(bucket_object[filename.text].file, f) - - write_state('decrypting') - - # FIXME: grab kernelId and ramdiskId from bundle manifest - hex_key = manifest.find("image/ec2_encrypted_key").text - encrypted_key = binascii.a2b_hex(hex_key) - hex_iv = manifest.find("image/ec2_encrypted_iv").text - encrypted_iv = binascii.a2b_hex(hex_iv) - cloud_private_key = os.path.join(FLAGS.ca_path, "private/cakey.pem") - - decrypted_filename = os.path.join(image_path, 'image.tar.gz') - Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, - cloud_private_key, decrypted_filename) - - write_state('untarring') - - image_file = Image.untarzip_image(image_path, decrypted_filename) - shutil.move(os.path.join(image_path, image_file), - os.path.join(image_path, 'image')) - - write_state('available') - os.unlink(decrypted_filename) - os.unlink(encrypted_filename) - - @staticmethod - def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv, - cloud_private_key, decrypted_filename): - key, err = utils.execute('openssl', - 'rsautl', - '-decrypt', - '-inkey', '%s' % cloud_private_key, - process_input=encrypted_key, - check_exit_code=False) - if err: - raise exception.Error(_("Failed to decrypt private key: %s") - % err) - iv, err = utils.execute('openssl', - 'rsautl', - '-decrypt', - '-inkey', '%s' % cloud_private_key, - process_input=encrypted_iv, - check_exit_code=False) - if err: - raise exception.Error(_("Failed to decrypt initialization " - "vector: %s") % err) - - _out, err = utils.execute('openssl', - 'enc', - '-d', - '-aes-128-cbc', - '-in', '%s' % (encrypted_filename,), - '-K', '%s' % (key,), - '-iv', '%s' % (iv,), - '-out', '%s' % (decrypted_filename,), - check_exit_code=False) - if err: - raise exception.Error(_("Failed to decrypt image file " - "%(image_file)s: %(err)s") % - {'image_file': encrypted_filename, - 'err': err}) - - @staticmethod - def untarzip_image(path, filename): - tar_file = tarfile.open(filename, "r|gz") - tar_file.extractall(path) - image_file = tar_file.getnames()[0] - tar_file.close() - return image_file diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py new file mode 100644 index 000000000..dd6327c8f --- /dev/null +++ b/nova/objectstore/s3server.py @@ -0,0 +1,335 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack LLC. +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of an S3-like storage server based on local files. + +Useful to test features that will eventually run on S3, or if you want to +run something locally that was once running on S3. + +We don't support all the features of S3, but it does work with the +standard S3 client for the most basic semantics. To use the standard +S3 client with this module: + + c = S3.AWSAuthConnection("", "", server="localhost", port=8888, + is_secure=False) + c.create_bucket("mybucket") + c.put("mybucket", "mykey", "a value") + print c.get("mybucket", "mykey").body + +""" + +import bisect +import datetime +import hashlib +import os +import os.path +import urllib + +import routes +import webob + +from nova import flags +from nova import log as logging +from nova import utils +from nova import wsgi + + +FLAGS = flags.FLAGS +flags.DEFINE_string('buckets_path', '$state_path/buckets', + 'path to s3 buckets') + + +class S3Application(wsgi.Router): + """Implementation of an S3-like storage server based on local files. + + If bucket depth is given, we break files up into multiple directories + to prevent hitting file system limits for number of files in each + directories. 1 means one level of directories, 2 means 2, etc. + + """ + + def __init__(self, root_directory, bucket_depth=0, mapper=None): + if mapper is None: + mapper = routes.Mapper() + + mapper.connect('/', + controller=lambda *a, **kw: RootHandler(self)(*a, **kw)) + mapper.connect('/{bucket}/{object_name}', + controller=lambda *a, **kw: ObjectHandler(self)(*a, **kw)) + mapper.connect('/{bucket_name}/', + controller=lambda *a, **kw: BucketHandler(self)(*a, **kw)) + self.directory = os.path.abspath(root_directory) + if not os.path.exists(self.directory): + os.makedirs(self.directory) + self.bucket_depth = bucket_depth + super(S3Application, self).__init__(mapper) + + +class BaseRequestHandler(wsgi.Controller): + """Base class emulating Tornado's web framework pattern in WSGI. + + This is a direct port of Tornado's implementation, so some key decisions + about how the code interacts have already been chosen. + + The two most common ways of designing web frameworks can be + classified as async object-oriented and sync functional. + + Tornado's is on the OO side because a response is built up in and using + the shared state of an object and one of the object's methods will + eventually trigger the "finishing" of the response asynchronously. + + Most WSGI stuff is in the functional side, we pass a request object to + every call down a chain and the eventual return value will be a response. + + Part of the function of the routing code in S3Application as well as the + code in BaseRequestHandler's __call__ method is to merge those two styles + together enough that the Tornado code can work without extensive + modifications. + + To do that it needs to give the Tornado-style code clean objects that it + can modify the state of for each request that is processed, so we use a + very simple factory lambda to create new state for each request, that's + the stuff in the router, and when we let the Tornado code modify that + object to handle the request, then we return the response it generated. + This wouldn't work the same if Tornado was being more async'y and doing + other callbacks throughout the process, but since Tornado is being + relatively simple here we can be satisfied that the response will be + complete by the end of the get/post method. + + """ + + def __init__(self, application): + self.application = application + + @webob.dec.wsgify + def __call__(self, request): + method = request.method.lower() + f = getattr(self, method, self.invalid) + self.request = request + self.response = webob.Response() + params = request.environ['wsgiorg.routing_args'][1] + del params['controller'] + f(**params) + return self.response + + def get_argument(self, arg, default): + return self.request.str_params.get(arg, default) + + def set_header(self, header, value): + self.response.headers[header] = value + + def set_status(self, status_code): + self.response.status = status_code + + def finish(self, body=''): + self.response.body = utils.utf8(body) + + def invalid(self, **kwargs): + pass + + def render_xml(self, value): + assert isinstance(value, dict) and len(value) == 1 + self.set_header("Content-Type", "application/xml; charset=UTF-8") + name = value.keys()[0] + parts = [] + parts.append('<' + utils.utf8(name) + + ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">') + self._render_parts(value.values()[0], parts) + parts.append('</' + utils.utf8(name) + '>') + self.finish('<?xml version="1.0" encoding="UTF-8"?>\n' + + ''.join(parts)) + + def _render_parts(self, value, parts=[]): + if isinstance(value, basestring): + parts.append(utils.xhtml_escape(value)) + elif isinstance(value, int) or isinstance(value, long): + parts.append(str(value)) + elif isinstance(value, datetime.datetime): + parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z")) + elif isinstance(value, dict): + for name, subvalue in value.iteritems(): + if not isinstance(subvalue, list): + subvalue = [subvalue] + for subsubvalue in subvalue: + parts.append('<' + utils.utf8(name) + '>') + self._render_parts(subsubvalue, parts) + parts.append('</' + utils.utf8(name) + '>') + else: + raise Exception("Unknown S3 value type %r", value) + + def _object_path(self, bucket, object_name): + if self.application.bucket_depth < 1: + return os.path.abspath(os.path.join( + self.application.directory, bucket, object_name)) + hash = hashlib.md5(object_name).hexdigest() + path = os.path.abspath(os.path.join( + self.application.directory, bucket)) + for i in range(self.application.bucket_depth): + path = os.path.join(path, hash[:2 * (i + 1)]) + return os.path.join(path, object_name) + + +class RootHandler(BaseRequestHandler): + def get(self): + names = os.listdir(self.application.directory) + buckets = [] + for name in names: + path = os.path.join(self.application.directory, name) + info = os.stat(path) + buckets.append({ + "Name": name, + "CreationDate": datetime.datetime.utcfromtimestamp( + info.st_ctime), + }) + self.render_xml({"ListAllMyBucketsResult": { + "Buckets": {"Bucket": buckets}, + }}) + + +class BucketHandler(BaseRequestHandler): + def get(self, bucket_name): + prefix = self.get_argument("prefix", u"") + marker = self.get_argument("marker", u"") + max_keys = int(self.get_argument("max-keys", 50000)) + path = os.path.abspath(os.path.join(self.application.directory, + bucket_name)) + terse = int(self.get_argument("terse", 0)) + if not path.startswith(self.application.directory) or \ + not os.path.isdir(path): + self.set_status(404) + return + object_names = [] + for root, dirs, files in os.walk(path): + for file_name in files: + object_names.append(os.path.join(root, file_name)) + skip = len(path) + 1 + for i in range(self.application.bucket_depth): + skip += 2 * (i + 1) + 1 + object_names = [n[skip:] for n in object_names] + object_names.sort() + contents = [] + + start_pos = 0 + if marker: + start_pos = bisect.bisect_right(object_names, marker, start_pos) + if prefix: + start_pos = bisect.bisect_left(object_names, prefix, start_pos) + + truncated = False + for object_name in object_names[start_pos:]: + if not object_name.startswith(prefix): + break + if len(contents) >= max_keys: + truncated = True + break + object_path = self._object_path(bucket_name, object_name) + c = {"Key": object_name} + if not terse: + info = os.stat(object_path) + c.update({ + "LastModified": datetime.datetime.utcfromtimestamp( + info.st_mtime), + "Size": info.st_size, + }) + contents.append(c) + marker = object_name + self.render_xml({"ListBucketResult": { + "Name": bucket_name, + "Prefix": prefix, + "Marker": marker, + "MaxKeys": max_keys, + "IsTruncated": truncated, + "Contents": contents, + }}) + + def put(self, bucket_name): + path = os.path.abspath(os.path.join( + self.application.directory, bucket_name)) + if not path.startswith(self.application.directory) or \ + os.path.exists(path): + self.set_status(403) + return + os.makedirs(path) + self.finish() + + def delete(self, bucket_name): + path = os.path.abspath(os.path.join( + self.application.directory, bucket_name)) + if not path.startswith(self.application.directory) or \ + not os.path.isdir(path): + self.set_status(404) + return + if len(os.listdir(path)) > 0: + self.set_status(403) + return + os.rmdir(path) + self.set_status(204) + self.finish() + + +class ObjectHandler(BaseRequestHandler): + def get(self, bucket, object_name): + object_name = urllib.unquote(object_name) + path = self._object_path(bucket, object_name) + if not path.startswith(self.application.directory) or \ + not os.path.isfile(path): + self.set_status(404) + return + info = os.stat(path) + self.set_header("Content-Type", "application/unknown") + self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp( + info.st_mtime)) + object_file = open(path, "r") + try: + self.finish(object_file.read()) + finally: + object_file.close() + + def put(self, bucket, object_name): + object_name = urllib.unquote(object_name) + bucket_dir = os.path.abspath(os.path.join( + self.application.directory, bucket)) + if not bucket_dir.startswith(self.application.directory) or \ + not os.path.isdir(bucket_dir): + self.set_status(404) + return + path = self._object_path(bucket, object_name) + if not path.startswith(bucket_dir) or os.path.isdir(path): + self.set_status(403) + return + directory = os.path.dirname(path) + if not os.path.exists(directory): + os.makedirs(directory) + object_file = open(path, "w") + object_file.write(self.request.body) + object_file.close() + self.set_header('ETag', + '"%s"' % hashlib.md5(self.request.body).hexdigest()) + self.finish() + + def delete(self, bucket, object_name): + object_name = urllib.unquote(object_name) + path = self._object_path(bucket, object_name) + if not path.startswith(self.application.directory) or \ + not os.path.isfile(path): + self.set_status(404) + return + os.unlink(path) + self.set_status(204) + self.finish() diff --git a/nova/objectstore/stored.py b/nova/objectstore/stored.py deleted file mode 100644 index a3f6e9c0b..000000000 --- a/nova/objectstore/stored.py +++ /dev/null @@ -1,63 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Properties of an object stored within a bucket. -""" - -import os - -import nova.crypto -from nova import exception - - -class Object(object): - def __init__(self, bucket, key): - """ wrapper class of an existing key """ - self.bucket = bucket - self.key = key - self.path = bucket._object_path(key) - if not os.path.isfile(self.path): - raise exception.NotFound - - def __repr__(self): - return "<Object %s/%s>" % (self.bucket, self.key) - - @property - def md5(self): - """ computes the MD5 of the contents of file """ - with open(self.path, "r") as f: - return nova.crypto.compute_md5(f) - - @property - def mtime(self): - """ mtime of file """ - return os.path.getmtime(self.path) - - def read(self): - """ read all contents of key into memory and return """ - return self.file.read() - - @property - def file(self): - """ return a file object for the key """ - return open(self.path, 'rb') - - def delete(self): - """ deletes the file """ - os.unlink(self.path) diff --git a/nova/rpc.py b/nova/rpc.py index 5935e1fb3..388f78d69 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -137,24 +137,7 @@ class Consumer(messaging.Consumer): return timer -class Publisher(messaging.Publisher): - """Publisher base class""" - pass - - -class TopicConsumer(Consumer): - """Consumes messages on a specific topic""" - exchange_type = "topic" - - def __init__(self, connection=None, topic="broadcast"): - self.queue = topic - self.routing_key = topic - self.exchange = FLAGS.control_exchange - self.durable = False - super(TopicConsumer, self).__init__(connection=connection) - - -class AdapterConsumer(TopicConsumer): +class AdapterConsumer(Consumer): """Calls methods on a proxy object based on method and args""" def __init__(self, connection=None, topic="broadcast", proxy=None): LOG.debug(_('Initing the Adapter Consumer for %s') % topic) @@ -207,6 +190,41 @@ class AdapterConsumer(TopicConsumer): return +class Publisher(messaging.Publisher): + """Publisher base class""" + pass + + +class TopicAdapterConsumer(AdapterConsumer): + """Consumes messages on a specific topic""" + exchange_type = "topic" + + def __init__(self, connection=None, topic="broadcast", proxy=None): + self.queue = topic + self.routing_key = topic + self.exchange = FLAGS.control_exchange + self.durable = False + super(TopicAdapterConsumer, self).__init__(connection=connection, + topic=topic, proxy=proxy) + + +class FanoutAdapterConsumer(AdapterConsumer): + """Consumes messages from a fanout exchange""" + exchange_type = "fanout" + + def __init__(self, connection=None, topic="broadcast", proxy=None): + self.exchange = "%s_fanout" % topic + self.routing_key = topic + unique = uuid.uuid4().hex + self.queue = "%s_fanout_%s" % (topic, unique) + self.durable = False + LOG.info(_("Created '%(exchange)s' fanout exchange " + "with '%(key)s' routing key"), + dict(exchange=self.exchange, key=self.routing_key)) + super(FanoutAdapterConsumer, self).__init__(connection=connection, + topic=topic, proxy=proxy) + + class TopicPublisher(Publisher): """Publishes messages on a specific topic""" exchange_type = "topic" @@ -218,6 +236,19 @@ class TopicPublisher(Publisher): super(TopicPublisher, self).__init__(connection=connection) +class FanoutPublisher(Publisher): + """Publishes messages to a fanout exchange.""" + exchange_type = "fanout" + + def __init__(self, topic, connection=None): + self.exchange = "%s_fanout" % topic + self.queue = "%s_fanout" % topic + self.durable = False + LOG.info(_("Creating '%(exchange)s' fanout exchange"), + dict(exchange=self.exchange)) + super(FanoutPublisher, self).__init__(connection=connection) + + class DirectConsumer(Consumer): """Consumes messages directly on a channel specified by msg_id""" exchange_type = "direct" @@ -360,6 +391,16 @@ def cast(context, topic, msg): publisher.close() +def fanout_cast(context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response""" + LOG.debug(_("Making asynchronous fanout cast...")) + _pack_context(msg, context) + conn = Connection.instance() + publisher = FanoutPublisher(topic, connection=conn) + publisher.send(msg) + publisher.close() + + def generic_response(message_data, message): """Logs a result and exits""" LOG.debug(_('response %s'), message_data) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 2405f1343..6bb3bf3cd 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -17,33 +17,225 @@ Handles all requests relating to schedulers. """ +import novaclient + +from nova import db +from nova import exception from nova import flags from nova import log as logging from nova import rpc +from eventlet import greenpool + FLAGS = flags.FLAGS +flags.DEFINE_bool('enable_zone_routing', + False, + 'When True, routing to child zones will occur.') + LOG = logging.getLogger('nova.scheduler.api') -class API(object): - """API for interacting with the scheduler.""" +def _call_scheduler(method, context, params=None): + """Generic handler for RPC calls to the scheduler. + + :param params: Optional dictionary of arguments to be passed to the + scheduler worker + + :retval: Result returned by scheduler worker + """ + if not params: + params = {} + queue = FLAGS.scheduler_topic + kwargs = {'method': method, 'args': params} + return rpc.call(context, queue, kwargs) + + +def get_zone_list(context): + """Return a list of zones assoicated with this zone.""" + items = _call_scheduler('get_zone_list', context) + for item in items: + item['api_url'] = item['api_url'].replace('\\/', '/') + if not items: + items = db.zone_get_all(context) + return items + + +def zone_get(context, zone_id): + return db.zone_get(context, zone_id) + + +def zone_delete(context, zone_id): + return db.zone_delete(context, zone_id) + + +def zone_create(context, data): + return db.zone_create(context, data) + + +def zone_update(context, zone_id, data): + return db.zone_update(context, zone_id, data) + + +def get_zone_capabilities(context, service=None): + """Returns a dict of key, value capabilities for this zone, + or for a particular class of services running in this zone.""" + return _call_scheduler('get_zone_capabilities', context=context, + params=dict(service=service)) + + +def update_service_capabilities(context, service_name, host, capabilities): + """Send an update to all the scheduler services informing them + of the capabilities of this service.""" + kwargs = dict(method='update_service_capabilities', + args=dict(service_name=service_name, host=host, + capabilities=capabilities)) + return rpc.fanout_cast(context, 'scheduler', kwargs) + + +def _wrap_method(function, self): + """Wrap method to supply self.""" + def _wrap(*args, **kwargs): + return function(self, *args, **kwargs) + return _wrap + + +def _process(func, zone): + """Worker stub for green thread pool. Give the worker + an authenticated nova client and zone info.""" + nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url) + nova.authenticate() + return func(nova, zone) + + +def child_zone_helper(zone_list, func): + """Fire off a command to each zone in the list. + The return is [novaclient return objects] from each child zone. + For example, if you are calling server.pause(), the list will + be whatever the response from server.pause() is. One entry + per child zone called.""" + green_pool = greenpool.GreenPool() + return [result for result in green_pool.imap( + _wrap_method(_process, func), zone_list)] + + +def _issue_novaclient_command(nova, zone, collection, method_name, item_id): + """Use novaclient to issue command to a single child zone. + One of these will be run in parallel for each child zone.""" + manager = getattr(nova, collection) + result = None + try: + try: + result = manager.get(int(item_id)) + except ValueError, e: + result = manager.find(name=item_id) + except novaclient.NotFound: + url = zone.api_url + LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" % + locals())) + return None + + if method_name.lower() not in ['get', 'find']: + result = getattr(result, method_name)() + return result + + +def wrap_novaclient_function(f, collection, method_name, item_id): + """Appends collection, method_name and item_id to the incoming + (nova, zone) call from child_zone_helper.""" + def inner(nova, zone): + return f(nova, zone, collection, method_name, item_id) + + return inner + + +class RedirectResult(exception.Error): + """Used to the HTTP API know that these results are pre-cooked + and they can be returned to the caller directly.""" + def __init__(self, results): + self.results = results + super(RedirectResult, self).__init__( + message=_("Uncaught Zone redirection exception")) + + +class reroute_compute(object): + """Decorator used to indicate that the method should + delegate the call the child zones if the db query + can't find anything.""" + def __init__(self, method_name): + self.method_name = method_name + + def __call__(self, f): + def wrapped_f(*args, **kwargs): + collection, context, item_id = \ + self.get_collection_context_and_id(args, kwargs) + try: + # Call the original function ... + return f(*args, **kwargs) + except exception.InstanceNotFound, e: + LOG.debug(_("Instance %(item_id)s not found " + "locally: '%(e)s'" % locals())) + + if not FLAGS.enable_zone_routing: + raise + + zones = db.zone_get_all(context) + if not zones: + raise + + # Ask the children to provide an answer ... + LOG.debug(_("Asking child zones ...")) + result = self._call_child_zones(zones, + wrap_novaclient_function(_issue_novaclient_command, + collection, self.method_name, item_id)) + # Scrub the results and raise another exception + # so the API layers can bail out gracefully ... + raise RedirectResult(self.unmarshall_result(result)) + return wrapped_f + + def _call_child_zones(self, zones, function): + """Ask the child zones to perform this operation. + Broken out for testing.""" + return child_zone_helper(zones, function) + + def get_collection_context_and_id(self, args, kwargs): + """Returns a tuple of (novaclient collection name, security + context and resource id. Derived class should override this.""" + context = kwargs.get('context', None) + instance_id = kwargs.get('instance_id', None) + if len(args) > 0 and not context: + context = args[1] + if len(args) > 1 and not instance_id: + instance_id = args[2] + return ("servers", context, instance_id) + + def unmarshall_result(self, zone_responses): + """Result is a list of responses from each child zone. + Each decorator derivation is responsible to turning this + into a format expected by the calling method. For + example, this one is expected to return a single Server + dict {'server':{k:v}}. Others may return a list of them, like + {'servers':[{k,v}]}""" + reduced_response = [] + for zone_response in zone_responses: + if not zone_response: + continue + + server = zone_response.__dict__ - def _call_scheduler(self, method, context, params=None): - """Generic handler for RPC calls to the scheduler. + for k in server.keys(): + if k[0] == '_' or k == 'manager': + del server[k] - :param params: Optional dictionary of arguments to be passed to the - scheduler worker + reduced_response.append(dict(server=server)) + if reduced_response: + return reduced_response[0] # first for now. + return {} - :retval: Result returned by scheduler worker - """ - if not params: - params = {} - queue = FLAGS.scheduler_topic - kwargs = {'method': method, 'args': params} - return rpc.call(context, queue, kwargs) - def get_zone_list(self, context): - items = self._call_scheduler('get_zone_list', context) - for item in items: - item['api_url'] = item['api_url'].replace('\\/', '/') - return items +def redirect_handler(f): + def new_f(*args, **kwargs): + try: + return f(*args, **kwargs) + except RedirectResult, e: + return e.results + return new_f diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index ed3dfe1c0..ce05d9f6a 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -49,6 +49,13 @@ class WillNotSchedule(exception.Error): class Scheduler(object): """The base class that all Scheduler clases should inherit from.""" + def __init__(self): + self.zone_manager = None + + def set_zone_manager(self, zone_manager): + """Called by the Scheduler Service to supply a ZoneManager.""" + self.zone_manager = zone_manager + @staticmethod def service_is_up(service): """Check whether a service is up based on last heartbeat.""" diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 053a53356..7d62cfc4e 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -41,10 +41,11 @@ flags.DEFINE_string('scheduler_driver', class SchedulerManager(manager.Manager): """Chooses a host to run instances on.""" def __init__(self, scheduler_driver=None, *args, **kwargs): + self.zone_manager = zone_manager.ZoneManager() if not scheduler_driver: scheduler_driver = FLAGS.scheduler_driver self.driver = utils.import_object(scheduler_driver) - self.zone_manager = zone_manager.ZoneManager() + self.driver.set_zone_manager(self.zone_manager) super(SchedulerManager, self).__init__(*args, **kwargs) def __getattr__(self, key): @@ -59,6 +60,17 @@ class SchedulerManager(manager.Manager): """Get a list of zones from the ZoneManager.""" return self.zone_manager.get_zone_list() + def get_zone_capabilities(self, context=None, service=None): + """Get the normalized set of capabilites for this zone, + or for a particular service.""" + return self.zone_manager.get_zone_capabilities(context, service) + + def update_service_capabilities(self, context=None, service_name=None, + host=None, capabilities={}): + """Process a capability update from a service node.""" + self.zone_manager.update_service_capabilities(service_name, + host, capabilities) + def _schedule(self, method, context, topic, *args, **kwargs): """Tries to call schedule_* method on the driver to retrieve host. diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index edf9000cc..198f9d4cc 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -58,8 +58,9 @@ class ZoneState(object): child zone.""" self.last_seen = datetime.now() self.attempt = 0 - self.name = zone_metadata["name"] - self.capabilities = zone_metadata["capabilities"] + self.name = zone_metadata.get("name", "n/a") + self.capabilities = ", ".join(["%s=%s" % (k, v) + for k, v in zone_metadata.iteritems() if k != 'name']) self.is_active = True def to_dict(self): @@ -104,13 +105,37 @@ class ZoneManager(object): """Keeps the zone states updated.""" def __init__(self): self.last_zone_db_check = datetime.min - self.zone_states = {} + self.zone_states = {} # { <zone_id> : ZoneState } + self.service_states = {} # { <service> : { <host> : { cap k : v }}} self.green_pool = greenpool.GreenPool() def get_zone_list(self): """Return the list of zones we know about.""" return [zone.to_dict() for zone in self.zone_states.values()] + def get_zone_capabilities(self, context, service=None): + """Roll up all the individual host info to generic 'service' + capabilities. Each capability is aggregated into + <cap>_min and <cap>_max values.""" + service_dict = self.service_states + if service: + service_dict = {service: self.service_states.get(service, {})} + + # TODO(sandy) - be smarter about fabricating this structure. + # But it's likely to change once we understand what the Best-Match + # code will need better. + combined = {} # { <service>_<cap> : (min, max), ... } + for service_name, host_dict in service_dict.iteritems(): + for host, caps_dict in host_dict.iteritems(): + for cap, value in caps_dict.iteritems(): + key = "%s_%s" % (service_name, cap) + min_value, max_value = combined.get(key, (value, value)) + min_value = min(min_value, value) + max_value = max(max_value, value) + combined[key] = (min_value, max_value) + + return combined + def _refresh_from_db(self, context): """Make our zone state map match the db.""" # Add/update existing zones ... @@ -141,3 +166,11 @@ class ZoneManager(object): self.last_zone_db_check = datetime.now() self._refresh_from_db(context) self._poll_zones(context) + + def update_service_capabilities(self, service_name, host, capabilities): + """Update the per-service capabilities based on this notification.""" + logging.debug(_("Received %(service_name)s service update from " + "%(host)s: %(capabilities)s") % locals()) + service_caps = self.service_states.get(service_name, {}) + service_caps[host] = capabilities + self.service_states[service_name] = service_caps diff --git a/nova/service.py b/nova/service.py index 52bb15ad7..47c0b96c0 100644 --- a/nova/service.py +++ b/nova/service.py @@ -97,18 +97,24 @@ class Service(object): conn1 = rpc.Connection.instance(new=True) conn2 = rpc.Connection.instance(new=True) + conn3 = rpc.Connection.instance(new=True) if self.report_interval: - consumer_all = rpc.AdapterConsumer( + consumer_all = rpc.TopicAdapterConsumer( connection=conn1, topic=self.topic, proxy=self) - consumer_node = rpc.AdapterConsumer( + consumer_node = rpc.TopicAdapterConsumer( connection=conn2, topic='%s.%s' % (self.topic, self.host), proxy=self) + fanout = rpc.FanoutAdapterConsumer( + connection=conn3, + topic=self.topic, + proxy=self) self.timers.append(consumer_all.attach_to_eventlet()) self.timers.append(consumer_node.attach_to_eventlet()) + self.timers.append(fanout.attach_to_eventlet()) pulse = utils.LoopingCall(self.report_state) pulse.start(interval=self.report_interval, now=False) diff --git a/nova/test.py b/nova/test.py index d8a47464f..3b608520a 100644 --- a/nova/test.py +++ b/nova/test.py @@ -24,6 +24,7 @@ and some black magic for inline callbacks. import datetime +import functools import os import shutil import uuid @@ -32,6 +33,7 @@ import unittest import mox import shutil import stubout +from eventlet import greenthread from nova import context from nova import db @@ -39,6 +41,7 @@ from nova import fakerabbit from nova import flags from nova import rpc from nova import service +from nova import wsgi FLAGS = flags.FLAGS @@ -79,6 +82,7 @@ class TestCase(unittest.TestCase): self.injected = [] self._services = [] self._monkey_patch_attach() + self._monkey_patch_wsgi() self._original_flags = FLAGS.FlagValuesDict() def tearDown(self): @@ -99,7 +103,8 @@ class TestCase(unittest.TestCase): self.reset_flags() # Reset our monkey-patches - rpc.Consumer.attach_to_eventlet = self.originalAttach + rpc.Consumer.attach_to_eventlet = self.original_attach + wsgi.Server.start = self.original_start # Stop any timers for x in self.injected: @@ -141,12 +146,90 @@ class TestCase(unittest.TestCase): return svc def _monkey_patch_attach(self): - self.originalAttach = rpc.Consumer.attach_to_eventlet + self.original_attach = rpc.Consumer.attach_to_eventlet - def _wrapped(innerSelf): - rv = self.originalAttach(innerSelf) + def _wrapped(inner_self): + rv = self.original_attach(inner_self) self.injected.append(rv) return rv - _wrapped.func_name = self.originalAttach.func_name + _wrapped.func_name = self.original_attach.func_name rpc.Consumer.attach_to_eventlet = _wrapped + + def _monkey_patch_wsgi(self): + """Allow us to kill servers spawned by wsgi.Server.""" + # TODO(termie): change these patterns to use functools + self.original_start = wsgi.Server.start + + @functools.wraps(self.original_start) + def _wrapped_start(inner_self, *args, **kwargs): + original_spawn_n = inner_self.pool.spawn_n + + @functools.wraps(original_spawn_n) + def _wrapped_spawn_n(*args, **kwargs): + rv = greenthread.spawn(*args, **kwargs) + self._services.append(rv) + + inner_self.pool.spawn_n = _wrapped_spawn_n + self.original_start(inner_self, *args, **kwargs) + inner_self.pool.spawn_n = original_spawn_n + + _wrapped_start.func_name = self.original_start.func_name + wsgi.Server.start = _wrapped_start + + # Useful assertions + def assertDictMatch(self, d1, d2): + """Assert two dicts are equivalent. + + This is a 'deep' match in the sense that it handles nested + dictionaries appropriately. + + NOTE: + + If you don't care (or don't know) a given value, you can specify + the string DONTCARE as the value. This will cause that dict-item + to be skipped. + """ + def raise_assertion(msg): + d1str = str(d1) + d2str = str(d2) + base_msg = ("Dictionaries do not match. %(msg)s d1: %(d1str)s " + "d2: %(d2str)s" % locals()) + raise AssertionError(base_msg) + + d1keys = set(d1.keys()) + d2keys = set(d2.keys()) + if d1keys != d2keys: + d1only = d1keys - d2keys + d2only = d2keys - d1keys + raise_assertion("Keys in d1 and not d2: %(d1only)s. " + "Keys in d2 and not d1: %(d2only)s" % locals()) + + for key in d1keys: + d1value = d1[key] + d2value = d2[key] + if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'): + self.assertDictMatch(d1value, d2value) + elif 'DONTCARE' in (d1value, d2value): + continue + elif d1value != d2value: + raise_assertion("d1['%(key)s']=%(d1value)s != " + "d2['%(key)s']=%(d2value)s" % locals()) + + def assertDictListMatch(self, L1, L2): + """Assert a list of dicts are equivalent""" + def raise_assertion(msg): + L1str = str(L1) + L2str = str(L2) + base_msg = ("List of dictionaries do not match: %(msg)s " + "L1: %(L1str)s L2: %(L2str)s" % locals()) + raise AssertionError(base_msg) + + L1count = len(L1) + L2count = len(L2) + if L1count != L2count: + raise_assertion("Length mismatch: len(L1)=%(L1count)d != " + "len(L2)=%(L2count)d" % locals()) + + for d1, d2 in zip(L1, L2): + self.assertDictMatch(d1, d2) diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py new file mode 100644 index 000000000..0860b51ac --- /dev/null +++ b/nova/tests/api/openstack/extensions/foxinsocks.py @@ -0,0 +1,98 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from nova import wsgi + +from nova.api.openstack import extensions + + +class FoxInSocksController(wsgi.Controller): + + def index(self, req): + return "Try to say this Mr. Knox, sir..." + + +class Foxinsocks(object): + + def __init__(self): + pass + + def get_name(self): + return "Fox In Socks" + + def get_alias(self): + return "FOXNSOX" + + def get_description(self): + return "The Fox In Socks Extension" + + def get_namespace(self): + return "http://www.fox.in.socks/api/ext/pie/v1.0" + + def get_updated(self): + return "2011-01-22T13:25:27-06:00" + + def get_resources(self): + resources = [] + resource = extensions.ResourceExtension('foxnsocks', + FoxInSocksController()) + resources.append(resource) + return resources + + def get_actions(self): + actions = [] + actions.append(extensions.ActionExtension('servers', 'add_tweedle', + self._add_tweedle)) + actions.append(extensions.ActionExtension('servers', 'delete_tweedle', + self._delete_tweedle)) + return actions + + def get_response_extensions(self): + response_exts = [] + + def _goose_handler(res): + #NOTE: This only handles JSON responses. + # You can use content type header to test for XML. + data = json.loads(res.body) + data['flavor']['googoose'] = "Gooey goo for chewy chewing!" + return data + + resp_ext = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)', + _goose_handler) + response_exts.append(resp_ext) + + def _bands_handler(res): + #NOTE: This only handles JSON responses. + # You can use content type header to test for XML. + data = json.loads(res.body) + data['big_bands'] = 'Pig Bands!' + return data + + resp_ext2 = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)', + _bands_handler) + response_exts.append(resp_ext2) + return response_exts + + def _add_tweedle(self, input_dict, req, id): + + return "Tweedle Beetle Added." + + def _delete_tweedle(self, input_dict, req, id): + + return "Tweedle Beetle Deleted." diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 56143114d..3cc68a536 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -143,6 +143,21 @@ def stub_out_compute_api_snapshot(stubs): stubs.Set(nova.compute.API, 'snapshot', snapshot) +def stub_out_glance_add_image(stubs, sent_to_glance): + """ + We return the metadata sent to glance by modifying the sent_to_glance dict + in place. + """ + orig_add_image = glance_client.Client.add_image + + def fake_add_image(context, metadata, data=None): + sent_to_glance['metadata'] = metadata + sent_to_glance['data'] = data + return orig_add_image(metadata, data) + + stubs.Set(glance_client.Client, 'add_image', fake_add_image) + + def stub_out_glance(stubs, initial_fixtures=None): class FakeGlanceClient: @@ -165,8 +180,9 @@ def stub_out_glance(stubs, initial_fixtures=None): def fake_add_image(self, image_meta, data=None): image_meta = copy.deepcopy(image_meta) - id = ''.join(random.choice(string.letters) for _ in range(20)) - image_meta['id'] = id + image_id = ''.join(random.choice(string.letters) + for _ in range(20)) + image_meta['id'] = image_id self.fixtures.append(image_meta) return image_meta @@ -185,9 +201,6 @@ def stub_out_glance(stubs, initial_fixtures=None): self.fixtures.remove(f) - ##def fake_delete_all(self): - ## self.fixtures = [] - def _find_image(self, image_id): for f in self.fixtures: if f['id'] == image_id: @@ -204,10 +217,10 @@ def stub_out_glance(stubs, initial_fixtures=None): stubs.Set(GlanceClient, 'add_image', fake.fake_add_image) stubs.Set(GlanceClient, 'update_image', fake.fake_update_image) stubs.Set(GlanceClient, 'delete_image', fake.fake_delete_image) - #stubs.Set(GlanceClient, 'delete_all', fake.fake_delete_all) class FakeToken(object): + # FIXME(sirp): let's not use id here id = 0 def __init__(self, **kwargs): diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py new file mode 100644 index 000000000..481d34ed1 --- /dev/null +++ b/nova/tests/api/openstack/test_extensions.py @@ -0,0 +1,236 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import unittest +import webob +import os.path + +from nova import context +from nova import flags +from nova.api import openstack +from nova.api.openstack import extensions +from nova.api.openstack import flavors +from nova.tests.api.openstack import fakes +import nova.wsgi + +FLAGS = flags.FLAGS + +response_body = "Try to say this Mr. Knox, sir..." + + +class StubController(nova.wsgi.Controller): + + def __init__(self, body): + self.body = body + + def index(self, req): + return self.body + + +class StubExtensionManager(object): + + def __init__(self, resource_ext=None, action_ext=None, response_ext=None): + self.resource_ext = resource_ext + self.action_ext = action_ext + self.response_ext = response_ext + + def get_name(self): + return "Tweedle Beetle Extension" + + def get_alias(self): + return "TWDLBETL" + + def get_description(self): + return "Provides access to Tweedle Beetles" + + def get_resources(self): + resource_exts = [] + if self.resource_ext: + resource_exts.append(self.resource_ext) + return resource_exts + + def get_actions(self): + action_exts = [] + if self.action_ext: + action_exts.append(self.action_ext) + return action_exts + + def get_response_extensions(self): + response_exts = [] + if self.response_ext: + response_exts.append(self.response_ext) + return response_exts + + +class ExtensionControllerTest(unittest.TestCase): + + def test_index(self): + app = openstack.APIRouterV11() + ext_midware = extensions.ExtensionMiddleware(app) + request = webob.Request.blank("/extensions") + response = request.get_response(ext_midware) + self.assertEqual(200, response.status_int) + + def test_get_by_alias(self): + app = openstack.APIRouterV11() + ext_midware = extensions.ExtensionMiddleware(app) + request = webob.Request.blank("/extensions/FOXNSOX") + response = request.get_response(ext_midware) + self.assertEqual(200, response.status_int) + + +class ResourceExtensionTest(unittest.TestCase): + + def test_no_extension_present(self): + manager = StubExtensionManager(None) + app = openstack.APIRouterV11() + ext_midware = extensions.ExtensionMiddleware(app, manager) + request = webob.Request.blank("/blah") + response = request.get_response(ext_midware) + self.assertEqual(404, response.status_int) + + def test_get_resources(self): + res_ext = extensions.ResourceExtension('tweedles', + StubController(response_body)) + manager = StubExtensionManager(res_ext) + app = openstack.APIRouterV11() + ext_midware = extensions.ExtensionMiddleware(app, manager) + request = webob.Request.blank("/tweedles") + response = request.get_response(ext_midware) + self.assertEqual(200, response.status_int) + self.assertEqual(response_body, response.body) + + def test_get_resources_with_controller(self): + res_ext = extensions.ResourceExtension('tweedles', + StubController(response_body)) + manager = StubExtensionManager(res_ext) + app = openstack.APIRouterV11() + ext_midware = extensions.ExtensionMiddleware(app, manager) + request = webob.Request.blank("/tweedles") + response = request.get_response(ext_midware) + self.assertEqual(200, response.status_int) + self.assertEqual(response_body, response.body) + + +class ExtensionManagerTest(unittest.TestCase): + + response_body = "Try to say this Mr. Knox, sir..." + + def setUp(self): + FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__), + "extensions") + + def test_get_resources(self): + app = openstack.APIRouterV11() + ext_midware = extensions.ExtensionMiddleware(app) + request = webob.Request.blank("/foxnsocks") + response = request.get_response(ext_midware) + self.assertEqual(200, response.status_int) + self.assertEqual(response_body, response.body) + + +class ActionExtensionTest(unittest.TestCase): + + def setUp(self): + FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__), + "extensions") + + def _send_server_action_request(self, url, body): + app = openstack.APIRouterV11() + ext_midware = extensions.ExtensionMiddleware(app) + request = webob.Request.blank(url) + request.method = 'POST' + request.content_type = 'application/json' + request.body = json.dumps(body) + response = request.get_response(ext_midware) + return response + + def test_extended_action(self): + body = dict(add_tweedle=dict(name="test")) + response = self._send_server_action_request("/servers/1/action", body) + self.assertEqual(200, response.status_int) + self.assertEqual("Tweedle Beetle Added.", response.body) + + body = dict(delete_tweedle=dict(name="test")) + response = self._send_server_action_request("/servers/1/action", body) + self.assertEqual(200, response.status_int) + self.assertEqual("Tweedle Beetle Deleted.", response.body) + + def test_invalid_action_body(self): + body = dict(blah=dict(name="test")) # Doesn't exist + response = self._send_server_action_request("/servers/1/action", body) + self.assertEqual(501, response.status_int) + + def test_invalid_action(self): + body = dict(blah=dict(name="test")) + response = self._send_server_action_request("/asdf/1/action", body) + self.assertEqual(404, response.status_int) + + +class ResponseExtensionTest(unittest.TestCase): + + def setUp(self): + super(ResponseExtensionTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_auth(self.stubs) + self.context = context.get_admin_context() + + def tearDown(self): + self.stubs.UnsetAll() + super(ResponseExtensionTest, self).tearDown() + + def test_get_resources_with_stub_mgr(self): + + test_resp = "Gooey goo for chewy chewing!" + + def _resp_handler(res): + # only handle JSON responses + data = json.loads(res.body) + data['flavor']['googoose'] = test_resp + return data + + resp_ext = extensions.ResponseExtension('GET', + '/v1.1/flavors/:(id)', + _resp_handler) + + manager = StubExtensionManager(None, None, resp_ext) + app = fakes.wsgi_app() + ext_midware = extensions.ExtensionMiddleware(app, manager) + request = webob.Request.blank("/v1.1/flavors/1") + request.environ['api.version'] = '1.1' + response = request.get_response(ext_midware) + self.assertEqual(200, response.status_int) + response_data = json.loads(response.body) + self.assertEqual(test_resp, response_data['flavor']['googoose']) + + def test_get_resources_with_mgr(self): + + test_resp = "Gooey goo for chewy chewing!" + + app = fakes.wsgi_app() + ext_midware = extensions.ExtensionMiddleware(app) + request = webob.Request.blank("/v1.1/flavors/1") + request.environ['api.version'] = '1.1' + response = request.get_response(ext_midware) + self.assertEqual(200, response.status_int) + response_data = json.loads(response.body) + self.assertEqual(test_resp, response_data['flavor']['googoose']) + self.assertEqual("Pig Bands!", response_data['big_bands']) diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index 4f504808c..954d72adf 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -19,11 +19,10 @@ import json import stubout import webob -from nova import test -import nova.api +import nova.db.api from nova import context -from nova.api.openstack import flavors -from nova import db +from nova import exception +from nova import test from nova.tests.api.openstack import fakes @@ -48,6 +47,10 @@ def return_instance_types(context, num=2): return instance_types +def return_instance_type_not_found(context, flavorid): + raise exception.NotFound() + + class FlavorsTest(test.TestCase): def setUp(self): super(FlavorsTest, self).setUp() @@ -67,7 +70,7 @@ class FlavorsTest(test.TestCase): self.stubs.UnsetAll() super(FlavorsTest, self).tearDown() - def test_get_flavor_list(self): + def test_get_flavor_list_v1_0(self): req = webob.Request.blank('/v1.0/flavors') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) @@ -84,7 +87,7 @@ class FlavorsTest(test.TestCase): ] self.assertEqual(flavors, expected) - def test_get_flavor_list_detail(self): + def test_get_flavor_list_detail_v1_0(self): req = webob.Request.blank('/v1.0/flavors/detail') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) @@ -105,7 +108,7 @@ class FlavorsTest(test.TestCase): ] self.assertEqual(flavors, expected) - def test_get_flavor_by_id(self): + def test_get_flavor_by_id_v1_0(self): req = webob.Request.blank('/v1.0/flavors/12') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) @@ -117,3 +120,144 @@ class FlavorsTest(test.TestCase): "disk": "10", } self.assertEqual(flavor, expected) + + def test_get_flavor_by_invalid_id(self): + self.stubs.Set(nova.db.api, "instance_type_get_by_flavor_id", + return_instance_type_not_found) + req = webob.Request.blank('/v1.0/flavors/asdf') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 404) + + def test_get_flavor_by_id_v1_1(self): + req = webob.Request.blank('/v1.1/flavors/12') + req.environ['api.version'] = '1.1' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + flavor = json.loads(res.body)["flavor"] + expected = { + "id": "12", + "name": "flavor 12", + "ram": "256", + "disk": "10", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/flavors/12", + }, + { + "rel": "bookmark", + "type": "application/json", + "href": "http://localhost/v1.1/flavors/12", + }, + { + "rel": "bookmark", + "type": "application/xml", + "href": "http://localhost/v1.1/flavors/12", + }, + ], + } + self.assertEqual(flavor, expected) + + def test_get_flavor_list_v1_1(self): + req = webob.Request.blank('/v1.1/flavors') + req.environ['api.version'] = '1.1' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + flavor = json.loads(res.body)["flavors"] + expected = [ + { + "id": "1", + "name": "flavor 1", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/flavors/1", + }, + { + "rel": "bookmark", + "type": "application/json", + "href": "http://localhost/v1.1/flavors/1", + }, + { + "rel": "bookmark", + "type": "application/xml", + "href": "http://localhost/v1.1/flavors/1", + }, + ], + }, + { + "id": "2", + "name": "flavor 2", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/flavors/2", + }, + { + "rel": "bookmark", + "type": "application/json", + "href": "http://localhost/v1.1/flavors/2", + }, + { + "rel": "bookmark", + "type": "application/xml", + "href": "http://localhost/v1.1/flavors/2", + }, + ], + }, + ] + self.assertEqual(flavor, expected) + + def test_get_flavor_list_detail_v1_1(self): + req = webob.Request.blank('/v1.1/flavors/detail') + req.environ['api.version'] = '1.1' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + flavor = json.loads(res.body)["flavors"] + expected = [ + { + "id": "1", + "name": "flavor 1", + "ram": "256", + "disk": "10", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/flavors/1", + }, + { + "rel": "bookmark", + "type": "application/json", + "href": "http://localhost/v1.1/flavors/1", + }, + { + "rel": "bookmark", + "type": "application/xml", + "href": "http://localhost/v1.1/flavors/1", + }, + ], + }, + { + "id": "2", + "name": "flavor 2", + "ram": "256", + "disk": "10", + "links": [ + { + "rel": "self", + "href": "http://localhost/v1.1/flavors/2", + }, + { + "rel": "bookmark", + "type": "application/json", + "href": "http://localhost/v1.1/flavors/2", + }, + { + "rel": "bookmark", + "type": "application/xml", + "href": "http://localhost/v1.1/flavors/2", + }, + ], + }, + ] + self.assertEqual(flavor, expected) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index feb32ed9f..1cdccadd6 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -29,6 +29,7 @@ import tempfile import stubout import webob +from glance import client as glance_client from nova import context from nova import exception from nova import flags @@ -43,78 +44,44 @@ FLAGS = flags.FLAGS class BaseImageServiceTests(object): - """Tasks to test for all image services""" def test_create(self): - - fixture = {'name': 'test image', - 'updated': None, - 'created': None, - 'status': None, - 'instance_id': None, - 'progress': None} - + fixture = self._make_fixture('test image') num_images = len(self.service.index(self.context)) - id = self.service.create(self.context, fixture)['id'] + image_id = self.service.create(self.context, fixture)['id'] - self.assertNotEquals(None, id) + self.assertNotEquals(None, image_id) self.assertEquals(num_images + 1, len(self.service.index(self.context))) def test_create_and_show_non_existing_image(self): - - fixture = {'name': 'test image', - 'updated': None, - 'created': None, - 'status': None, - 'instance_id': None, - 'progress': None} - + fixture = self._make_fixture('test image') num_images = len(self.service.index(self.context)) - id = self.service.create(self.context, fixture)['id'] - - self.assertNotEquals(None, id) + image_id = self.service.create(self.context, fixture)['id'] + self.assertNotEquals(None, image_id) self.assertRaises(exception.NotFound, self.service.show, self.context, 'bad image id') def test_update(self): - - fixture = {'name': 'test image', - 'updated': None, - 'created': None, - 'status': None, - 'instance_id': None, - 'progress': None} - - id = self.service.create(self.context, fixture)['id'] - + fixture = self._make_fixture('test image') + image_id = self.service.create(self.context, fixture)['id'] fixture['status'] = 'in progress' - self.service.update(self.context, id, fixture) - new_image_data = self.service.show(self.context, id) + self.service.update(self.context, image_id, fixture) + + new_image_data = self.service.show(self.context, image_id) self.assertEquals('in progress', new_image_data['status']) def test_delete(self): - - fixtures = [ - {'name': 'test image 1', - 'updated': None, - 'created': None, - 'status': None, - 'instance_id': None, - 'progress': None}, - {'name': 'test image 2', - 'updated': None, - 'created': None, - 'status': None, - 'instance_id': None, - 'progress': None}] + fixture1 = self._make_fixture('test image 1') + fixture2 = self._make_fixture('test image 2') + fixtures = [fixture1, fixture2] num_images = len(self.service.index(self.context)) self.assertEquals(0, num_images, str(self.service.index(self.context))) @@ -132,6 +99,22 @@ class BaseImageServiceTests(object): num_images = len(self.service.index(self.context)) self.assertEquals(1, num_images) + def test_index(self): + fixture = self._make_fixture('test image') + image_id = self.service.create(self.context, fixture)['id'] + image_metas = self.service.index(self.context) + expected = [{'id': 'DONTCARE', 'name': 'test image'}] + self.assertDictListMatch(image_metas, expected) + + @staticmethod + def _make_fixture(name): + fixture = {'name': 'test image', + 'updated': None, + 'created': None, + 'status': None, + 'is_public': True} + return fixture + class LocalImageServiceTest(test.TestCase, BaseImageServiceTests): @@ -167,8 +150,17 @@ class LocalImageServiceTest(test.TestCase, class GlanceImageServiceTest(test.TestCase, BaseImageServiceTests): - """Tests the local image service""" + """Tests the Glance image service, in particular that metadata translation + works properly. + + At a high level, the translations involved are: + 1. Glance -> ImageService - This is needed so we can support + multple ImageServices (Glance, Local, etc) + + 2. ImageService -> API - This is needed so we can support multple + APIs (OpenStack, EC2) + """ def setUp(self): super(GlanceImageServiceTest, self).setUp() self.stubs = stubout.StubOutForTesting() @@ -176,41 +168,53 @@ class GlanceImageServiceTest(test.TestCase, fakes.stub_out_compute_api_snapshot(self.stubs) service_class = 'nova.image.glance.GlanceImageService' self.service = utils.import_object(service_class) - self.context = context.RequestContext(None, None) + self.context = context.RequestContext(1, None) self.service.delete_all() + self.sent_to_glance = {} + fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance) def tearDown(self): self.stubs.UnsetAll() super(GlanceImageServiceTest, self).tearDown() + def test_create_with_instance_id(self): + """Ensure instance_id is persisted as an image-property""" + fixture = {'name': 'test image', + 'is_public': False, + 'properties': {'instance_id': '42', 'user_id': '1'}} -class ImageControllerWithGlanceServiceTest(test.TestCase): + image_id = self.service.create(self.context, fixture)['id'] + expected = fixture + self.assertDictMatch(self.sent_to_glance['metadata'], expected) + + image_meta = self.service.show(self.context, image_id) + expected = {'id': image_id, + 'name': 'test image', + 'is_public': False, + 'properties': {'instance_id': '42', 'user_id': '1'}} + self.assertDictMatch(image_meta, expected) + + image_metas = self.service.detail(self.context) + self.assertDictMatch(image_metas[0], expected) + + def test_create_without_instance_id(self): + """ + Ensure we can create an image without having to specify an + instance_id. Public images are an example of an image not tied to an + instance. + """ + fixture = {'name': 'test image'} + image_id = self.service.create(self.context, fixture)['id'] + + expected = {'name': 'test image', 'properties': {}} + self.assertDictMatch(self.sent_to_glance['metadata'], expected) + +class ImageControllerWithGlanceServiceTest(test.TestCase): """Test of the OpenStack API /images application controller""" - # Registered images at start of each test. - now = datetime.datetime.utcnow() - IMAGE_FIXTURES = [ - {'id': '23g2ogk23k4hhkk4k42l', - 'imageId': '23g2ogk23k4hhkk4k42l', - 'name': 'public image #1', - 'created_at': now.isoformat(), - 'updated_at': now.isoformat(), - 'deleted_at': None, - 'deleted': False, - 'is_public': True, - 'status': 'available', - 'image_type': 'kernel'}, - {'id': 'slkduhfas73kkaskgdas', - 'imageId': 'slkduhfas73kkaskgdas', - 'name': 'public image #2', - 'created_at': now.isoformat(), - 'updated_at': now.isoformat(), - 'deleted_at': None, - 'deleted': False, - 'is_public': True, - 'status': 'available', - 'image_type': 'ramdisk'}] + NOW_GLANCE_FORMAT = "2010-10-11T10:30:22" + NOW_API_FORMAT = "2010-10-11T10:30:22Z" def setUp(self): super(ImageControllerWithGlanceServiceTest, self).setUp() @@ -223,7 +227,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_auth(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) - fakes.stub_out_glance(self.stubs, initial_fixtures=self.IMAGE_FIXTURES) + fixtures = self._make_image_fixtures() + fakes.stub_out_glance(self.stubs, initial_fixtures=fixtures) def tearDown(self): self.stubs.UnsetAll() @@ -233,34 +238,94 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): def test_get_image_index(self): req = webob.Request.blank('/v1.0/images') res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) + image_metas = json.loads(res.body)['images'] - fixture_index = [dict(id=f['id'], name=f['name']) for f - in self.IMAGE_FIXTURES] + expected = [{'id': 123, 'name': 'public image'}, + {'id': 124, 'name': 'queued backup'}, + {'id': 125, 'name': 'saving backup'}, + {'id': 126, 'name': 'active backup'}, + {'id': 127, 'name': 'killed backup'}] - for image in res_dict['images']: - self.assertEquals(1, fixture_index.count(image), - "image %s not in fixture index!" % str(image)) + self.assertDictListMatch(image_metas, expected) def test_get_image_details(self): req = webob.Request.blank('/v1.0/images/detail') res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) - - for image in self.IMAGE_FIXTURES: - expected = { - 'id': abs(hash(image['imageId'])), - 'name': image['name'], - 'status': 'active', - } - self.assertTrue(expected in res_dict['images']) - - def test_show_image(self): - expected = self.IMAGE_FIXTURES[0] - id = abs(hash(expected['id'])) - expected_time = self.now.strftime('%Y-%m-%dT%H:%M:%SZ') - req = webob.Request.blank('/v1.0/images/%s' % id) + image_metas = json.loads(res.body)['images'] + + now = self.NOW_API_FORMAT + expected = [ + {'id': 123, 'name': 'public image', 'updated': now, + 'created': now, 'status': 'ACTIVE'}, + {'id': 124, 'name': 'queued backup', 'serverId': 42, + 'updated': now, 'created': now, + 'status': 'QUEUED'}, + {'id': 125, 'name': 'saving backup', 'serverId': 42, + 'updated': now, 'created': now, + 'status': 'SAVING', 'progress': 0}, + {'id': 126, 'name': 'active backup', 'serverId': 42, + 'updated': now, 'created': now, + 'status': 'ACTIVE'}, + {'id': 127, 'name': 'killed backup', 'serverId': 42, + 'updated': now, 'created': now, + 'status': 'FAILED'} + ] + + self.assertDictListMatch(image_metas, expected) + + def test_get_image_found(self): + req = webob.Request.blank('/v1.0/images/123') + res = req.get_response(fakes.wsgi_app()) + image_meta = json.loads(res.body)['image'] + expected = {'id': 123, 'name': 'public image', + 'updated': self.NOW_API_FORMAT, + 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE'} + self.assertDictMatch(image_meta, expected) + + def test_get_image_non_existent(self): + req = webob.Request.blank('/v1.0/images/4242') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 404) + + def test_get_image_not_owned(self): + """We should return a 404 if we request an image that doesn't belong + to us + """ + req = webob.Request.blank('/v1.0/images/128') res = req.get_response(fakes.wsgi_app()) - actual = json.loads(res.body)['image'] - self.assertEqual(expected_time, actual['created_at']) - self.assertEqual(expected_time, actual['updated_at']) + self.assertEqual(res.status_int, 404) + + @classmethod + def _make_image_fixtures(cls): + image_id = 123 + base_attrs = {'created_at': cls.NOW_GLANCE_FORMAT, + 'updated_at': cls.NOW_GLANCE_FORMAT, + 'deleted_at': None, + 'deleted': False} + + fixtures = [] + + def add_fixture(**kwargs): + kwargs.update(base_attrs) + fixtures.append(kwargs) + + # Public image + add_fixture(id=image_id, name='public image', is_public=True, + status='active', properties={}) + image_id += 1 + + # Backup for User 1 + backup_properties = {'instance_id': '42', 'user_id': '1'} + for status in ('queued', 'saving', 'active', 'killed'): + add_fixture(id=image_id, name='%s backup' % status, + is_public=False, status=status, + properties=backup_properties) + image_id += 1 + + # Backup for User 2 + other_backup_properties = {'instance_id': '43', 'user_id': '2'} + add_fixture(id=image_id, name='someone elses backup', is_public=False, + status='active', properties=other_backup_properties) + image_id += 1 + + return fixtures diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py new file mode 100644 index 000000000..c8d456472 --- /dev/null +++ b/nova/tests/api/openstack/test_server_metadata.py @@ -0,0 +1,164 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import unittest +import webob + + +from nova.api import openstack +from nova.tests.api.openstack import fakes +import nova.wsgi + + +def return_create_instance_metadata(context, server_id, metadata): + return stub_server_metadata() + + +def return_server_metadata(context, server_id): + return stub_server_metadata() + + +def return_empty_server_metadata(context, server_id): + return {} + + +def delete_server_metadata(context, server_id, key): + pass + + +def stub_server_metadata(): + metadata = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5" + } + return metadata + + +class ServerMetaDataTest(unittest.TestCase): + + def setUp(self): + super(ServerMetaDataTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.auth_data = {} + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_auth(self.stubs) + fakes.stub_out_key_pair_funcs(self.stubs) + + def tearDown(self): + self.stubs.UnsetAll() + super(ServerMetaDataTest, self).tearDown() + + def test_index(self): + self.stubs.Set(nova.db.api, 'instance_metadata_get', + return_server_metadata) + req = webob.Request.blank('/v1.1/servers/1/meta') + req.environ['api.version'] = '1.1' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(200, res.status_int) + self.assertEqual('value1', res_dict['metadata']['key1']) + + def test_index_no_data(self): + self.stubs.Set(nova.db.api, 'instance_metadata_get', + return_empty_server_metadata) + req = webob.Request.blank('/v1.1/servers/1/meta') + req.environ['api.version'] = '1.1' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(200, res.status_int) + self.assertEqual(0, len(res_dict['metadata'])) + + def test_show(self): + self.stubs.Set(nova.db.api, 'instance_metadata_get', + return_server_metadata) + req = webob.Request.blank('/v1.1/servers/1/meta/key5') + req.environ['api.version'] = '1.1' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(200, res.status_int) + self.assertEqual('value5', res_dict['key5']) + + def test_show_meta_not_found(self): + self.stubs.Set(nova.db.api, 'instance_metadata_get', + return_empty_server_metadata) + req = webob.Request.blank('/v1.1/servers/1/meta/key6') + req.environ['api.version'] = '1.1' + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(404, res.status_int) + + def test_delete(self): + self.stubs.Set(nova.db.api, 'instance_metadata_delete', + delete_server_metadata) + req = webob.Request.blank('/v1.1/servers/1/meta/key5') + req.environ['api.version'] = '1.1' + req.method = 'DELETE' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) + + def test_create(self): + self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + return_create_instance_metadata) + req = webob.Request.blank('/v1.1/servers/1/meta') + req.environ['api.version'] = '1.1' + req.method = 'POST' + req.body = '{"metadata": {"key1": "value1"}}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(200, res.status_int) + self.assertEqual('value1', res_dict['metadata']['key1']) + + def test_update_item(self): + self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + return_create_instance_metadata) + req = webob.Request.blank('/v1.1/servers/1/meta/key1') + req.environ['api.version'] = '1.1' + req.method = 'PUT' + req.body = '{"key1": "value1"}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, res.status_int) + res_dict = json.loads(res.body) + self.assertEqual('value1', res_dict['key1']) + + def test_update_item_too_many_keys(self): + self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + return_create_instance_metadata) + req = webob.Request.blank('/v1.1/servers/1/meta/key1') + req.environ['api.version'] = '1.1' + req.method = 'PUT' + req.body = '{"key1": "value1", "key2": "value2"}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) + + def test_update_item_body_uri_mismatch(self): + self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + return_create_instance_metadata) + req = webob.Request.blank('/v1.1/servers/1/meta/bad') + req.environ['api.version'] = '1.1' + req.method = 'PUT' + req.body = '{"key1": "value1"}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3f160df85..cfff77198 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -294,6 +294,36 @@ class ServersTest(test.TestCase): servers = json.loads(res.body)['servers'] self.assertEqual([s['id'] for s in servers], [1, 2]) + def test_get_servers_with_bad_limit(self): + req = webob.Request.blank('/v1.0/servers?limit=asdf&offset=1') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + self.assertTrue(res.body.find('limit param') > -1) + + def test_get_servers_with_bad_offset(self): + req = webob.Request.blank('/v1.0/servers?limit=2&offset=asdf') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + self.assertTrue(res.body.find('offset param') > -1) + + def test_get_servers_with_marker(self): + req = webob.Request.blank('/v1.1/servers?marker=2') + res = req.get_response(fakes.wsgi_app()) + servers = json.loads(res.body)['servers'] + self.assertEqual([s['id'] for s in servers], [3, 4]) + + def test_get_servers_with_limit_and_marker(self): + req = webob.Request.blank('/v1.1/servers?limit=2&marker=1') + res = req.get_response(fakes.wsgi_app()) + servers = json.loads(res.body)['servers'] + self.assertEqual([s['id'] for s in servers], [2, 3]) + + def test_get_servers_with_bad_marker(self): + req = webob.Request.blank('/v1.1/servers?limit=2&marker=asdf') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + self.assertTrue(res.body.find('marker param') > -1) + def _setup_for_create_instance(self): """Shared implementation for tests below that create instance""" def instance_create(context, inst): diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index 38399bb3f..a3f191aaa 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -75,6 +75,10 @@ def zone_get_all_db(context): ] +def zone_capabilities(method, context, params): + return dict() + + class ZonesTest(test.TestCase): def setUp(self): super(ZonesTest, self).setUp() @@ -93,13 +97,18 @@ class ZonesTest(test.TestCase): self.stubs.Set(nova.db, 'zone_create', zone_create) self.stubs.Set(nova.db, 'zone_delete', zone_delete) + self.old_zone_name = FLAGS.zone_name + self.old_zone_capabilities = FLAGS.zone_capabilities + def tearDown(self): self.stubs.UnsetAll() FLAGS.allow_admin_api = self.allow_admin + FLAGS.zone_name = self.old_zone_name + FLAGS.zone_capabilities = self.old_zone_capabilities super(ZonesTest, self).tearDown() def test_get_zone_list_scheduler(self): - self.stubs.Set(api.API, '_call_scheduler', zone_get_all_scheduler) + self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler) req = webob.Request.blank('/v1.0/zones') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -108,8 +117,7 @@ class ZonesTest(test.TestCase): self.assertEqual(len(res_dict['zones']), 2) def test_get_zone_list_db(self): - self.stubs.Set(api.API, '_call_scheduler', - zone_get_all_scheduler_empty) + self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler_empty) self.stubs.Set(nova.db, 'zone_get_all', zone_get_all_db) req = webob.Request.blank('/v1.0/zones') req.headers["Content-Type"] = "application/json" @@ -167,3 +175,18 @@ class ZonesTest(test.TestCase): self.assertEqual(res_dict['zone']['id'], 1) self.assertEqual(res_dict['zone']['api_url'], 'http://example.com') self.assertFalse('username' in res_dict['zone']) + + def test_zone_info(self): + FLAGS.zone_name = 'darksecret' + FLAGS.zone_capabilities = ['cap1=a;b', 'cap2=c;d'] + self.stubs.Set(api, '_call_scheduler', zone_capabilities) + + body = dict(zone=dict(username='zeb', password='sneaky')) + req = webob.Request.blank('/v1.0/zones/info') + + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(res.status_int, 200) + self.assertEqual(res_dict['zone']['name'], 'darksecret') + self.assertEqual(res_dict['zone']['cap1'], 'a;b') + self.assertEqual(res_dict['zone']['cap2'], 'c;d') diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index f1f8504f3..d03aa9cc8 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -19,6 +19,8 @@ import datetime import unittest +from nova import context +from nova import test from nova.image import glance @@ -29,14 +31,14 @@ class StubGlanceClient(object): self.add_response = add_response self.update_response = update_response - def get_image_meta(self, id): - return self.images[id] + def get_image_meta(self, image_id): + return self.images[image_id] def get_images_detailed(self): return self.images.itervalues() - def get_image(self, id): - return self.images[id], [] + def get_image(self, image_id): + return self.images[image_id], [] def add_image(self, metadata, data): return self.add_response @@ -46,143 +48,144 @@ class StubGlanceClient(object): class NullWriter(object): + """Used to test ImageService.get which takes a writer object""" def write(self, *arg, **kwargs): pass -class TestGlanceImageServiceDatetimes(unittest.TestCase): +class BaseGlanceTest(unittest.TestCase): + NOW_GLANCE_FORMAT = "2010-10-11T10:30:22" + NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22) def setUp(self): + # FIXME(sirp): we can probably use stubs library here rather than + # dependency injection self.client = StubGlanceClient(None) self.service = glance.GlanceImageService(self.client) + self.context = context.RequestContext(None, None) + def assertDateTimesFilled(self, image_meta): + self.assertEqual(image_meta['created_at'], self.NOW_DATETIME) + self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME) + self.assertEqual(image_meta['deleted_at'], self.NOW_DATETIME) + + def assertDateTimesEmpty(self, image_meta): + self.assertEqual(image_meta['updated_at'], None) + self.assertEqual(image_meta['deleted_at'], None) + + +class TestGlanceImageServiceProperties(BaseGlanceTest): def test_show_passes_through_to_client(self): - self.client.images = {'xyz': {'foo': 'bar'}} - self.assertEqual(self.service.show({}, 'xyz'), {'foo': 'bar'}) + """Ensure attributes which aren't BASE_IMAGE_ATTRS are stored in the + properties dict + """ + fixtures = {'image1': {'name': 'image1', 'is_public': True, + 'foo': 'bar', + 'properties': {'prop1': 'propvalue1'}}} + self.client.images = fixtures + image_meta = self.service.show(self.context, 'image1') + + expected = {'name': 'image1', 'is_public': True, + 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} + self.assertEqual(image_meta, expected) def test_detail_passes_through_to_client(self): - self.client.images = {1: {'foo': 'bar'}} - self.assertEqual(list(self.service.detail({})), [{'foo': 'bar'}]) - - def test_show_makes_create_datetimes(self): - create_time = datetime.datetime.utcnow() - self.client.images = {'xyz': { - 'id': "id", - 'name': "my awesome image", - 'created_at': create_time.isoformat(), - }} - actual = self.service.show({}, 'xyz') - self.assertEqual(actual['created_at'], create_time) - - def test_show_makes_update_datetimes(self): - update_time = datetime.datetime.utcnow() - self.client.images = {'abc': { - 'id': "id", - 'name': "my okay image", - 'updated_at': update_time.isoformat(), - }} - actual = self.service.show({}, 'abc') - self.assertEqual(actual['updated_at'], update_time) - - def test_show_makes_delete_datetimes(self): - delete_time = datetime.datetime.utcnow() - self.client.images = {'123': { - 'id': "123", - 'name': "my lame image", - 'deleted_at': delete_time.isoformat(), - }} - actual = self.service.show({}, '123') - self.assertEqual(actual['deleted_at'], delete_time) - - def test_show_handles_deleted_at_none(self): - self.client.images = {'747': { - 'id': "747", - 'name': "not deleted", - 'deleted_at': None, - }} - actual = self.service.show({}, '747') - self.assertEqual(actual['deleted_at'], None) - - def test_detail_handles_timestamps(self): - now = datetime.datetime.utcnow() - image1 = { - 'id': 1, - 'name': 'image 1', - 'created_at': now.isoformat(), - 'updated_at': now.isoformat(), - 'deleted_at': None, - } - image2 = { - 'id': 2, - 'name': 'image 2', - 'deleted_at': now.isoformat(), - } - self.client.images = {1: image1, 2: image2} - i1, i2 = self.service.detail({}) - self.assertEqual(i1['created_at'], now) - self.assertEqual(i1['updated_at'], now) - self.assertEqual(i1['deleted_at'], None) - self.assertEqual(i2['deleted_at'], now) - - def test_get_handles_timestamps(self): - now = datetime.datetime.utcnow() - self.client.images = {'abcd': { - 'id': 'abcd', - 'name': 'nifty image', - 'created_at': now.isoformat(), - 'updated_at': now.isoformat(), - 'deleted_at': now.isoformat(), - }} - actual = self.service.get({}, 'abcd', NullWriter()) - for attr in ('created_at', 'updated_at', 'deleted_at'): - self.assertEqual(actual[attr], now) - - def test_get_handles_deleted_at_none(self): - self.client.images = {'abcd': {'deleted_at': None}} - actual = self.service.get({}, 'abcd', NullWriter()) - self.assertEqual(actual['deleted_at'], None) - - def test_create_handles_timestamps(self): - now = datetime.datetime.utcnow() - self.client.add_response = { - 'id': 'abcd', - 'name': 'blah', - 'created_at': now.isoformat(), - 'updated_at': now.isoformat(), - 'deleted_at': now.isoformat(), - } - actual = self.service.create({}, {}) - for attr in ('created_at', 'updated_at', 'deleted_at'): - self.assertEqual(actual[attr], now) - - def test_create_handles_deleted_at_none(self): - self.client.add_response = { - 'id': 'abcd', - 'name': 'blah', - 'deleted_at': None, - } - actual = self.service.create({}, {}) - self.assertEqual(actual['deleted_at'], None) - - def test_update_handles_timestamps(self): - now = datetime.datetime.utcnow() - self.client.update_response = { - 'id': 'abcd', - 'name': 'blah', - 'created_at': now.isoformat(), - 'updated_at': now.isoformat(), - 'deleted_at': now.isoformat(), - } - actual = self.service.update({}, 'dummy_id', {}) - for attr in ('created_at', 'updated_at', 'deleted_at'): - self.assertEqual(actual[attr], now) - - def test_create_handles_deleted_at_none(self): - self.client.update_response = { - 'id': 'abcd', - 'name': 'blah', - 'deleted_at': None, - } - actual = self.service.update({}, 'dummy_id', {}) - self.assertEqual(actual['deleted_at'], None) + fixtures = {'image1': {'name': 'image1', 'is_public': True, + 'foo': 'bar', + 'properties': {'prop1': 'propvalue1'}}} + self.client.images = fixtures + image_meta = self.service.detail(self.context) + expected = [{'name': 'image1', 'is_public': True, + 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}] + self.assertEqual(image_meta, expected) + + +class TestGetterDateTimeNoneTests(BaseGlanceTest): + + def test_show_handles_none_datetimes(self): + self.client.images = self._make_none_datetime_fixtures() + image_meta = self.service.show(self.context, 'image1') + self.assertDateTimesEmpty(image_meta) + + def test_detail_handles_none_datetimes(self): + self.client.images = self._make_none_datetime_fixtures() + image_meta = self.service.detail(self.context)[0] + self.assertDateTimesEmpty(image_meta) + + def test_get_handles_none_datetimes(self): + self.client.images = self._make_none_datetime_fixtures() + writer = NullWriter() + image_meta = self.service.get(self.context, 'image1', writer) + self.assertDateTimesEmpty(image_meta) + + def test_show_makes_datetimes(self): + self.client.images = self._make_datetime_fixtures() + image_meta = self.service.show(self.context, 'image1') + self.assertDateTimesFilled(image_meta) + + def test_detail_makes_datetimes(self): + self.client.images = self._make_datetime_fixtures() + image_meta = self.service.detail(self.context)[0] + self.assertDateTimesFilled(image_meta) + + def test_get_makes_datetimes(self): + self.client.images = self._make_datetime_fixtures() + writer = NullWriter() + image_meta = self.service.get(self.context, 'image1', writer) + self.assertDateTimesFilled(image_meta) + + def _make_datetime_fixtures(self): + fixtures = {'image1': {'name': 'image1', 'is_public': True, + 'created_at': self.NOW_GLANCE_FORMAT, + 'updated_at': self.NOW_GLANCE_FORMAT, + 'deleted_at': self.NOW_GLANCE_FORMAT}} + return fixtures + + def _make_none_datetime_fixtures(self): + fixtures = {'image1': {'name': 'image1', 'is_public': True, + 'updated_at': None, + 'deleted_at': None}} + return fixtures + + +class TestMutatorDateTimeTests(BaseGlanceTest): + """Tests create(), update()""" + + def test_create_handles_datetimes(self): + self.client.add_response = self._make_datetime_fixture() + image_meta = self.service.create(self.context, {}) + self.assertDateTimesFilled(image_meta) + + def test_create_handles_none_datetimes(self): + self.client.add_response = self._make_none_datetime_fixture() + dummy_meta = {} + image_meta = self.service.create(self.context, dummy_meta) + self.assertDateTimesEmpty(image_meta) + + def test_update_handles_datetimes(self): + self.client.update_response = self._make_datetime_fixture() + dummy_id = 'dummy_id' + dummy_meta = {} + image_meta = self.service.update(self.context, 'dummy_id', dummy_meta) + self.assertDateTimesFilled(image_meta) + + def test_update_handles_none_datetimes(self): + self.client.update_response = self._make_none_datetime_fixture() + dummy_id = 'dummy_id' + dummy_meta = {} + image_meta = self.service.update(self.context, 'dummy_id', dummy_meta) + self.assertDateTimesEmpty(image_meta) + + def _make_datetime_fixture(self): + fixture = {'id': 'image1', 'name': 'image1', 'is_public': True, + 'created_at': self.NOW_GLANCE_FORMAT, + 'updated_at': self.NOW_GLANCE_FORMAT, + 'deleted_at': self.NOW_GLANCE_FORMAT} + return fixture + + def _make_none_datetime_fixture(self): + fixture = {'id': 'image1', 'name': 'image1', 'is_public': True, + 'updated_at': None, + 'deleted_at': None} + return fixture diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 47093636e..cc7326e73 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -75,8 +75,6 @@ class TestUser(object): class IntegratedUnitTestContext(object): - __INSTANCE = None - def __init__(self): self.auth_manager = manager.AuthManager() @@ -92,7 +90,6 @@ class IntegratedUnitTestContext(object): def setup(self): self._start_services() - self._create_test_user() def _create_test_user(self): @@ -109,14 +106,6 @@ class IntegratedUnitTestContext(object): self._start_api_service() def cleanup(self): - for service in self.services: - service.kill() - self.services = [] - # TODO(justinsb): Shutdown WSGI & anything else we startup - # bug731668 - # WSGI shutdown broken :-( - # self.wsgi_server.terminate() - # self.wsgi_server = None self.test_user = None def _create_unittest_user(self): @@ -150,39 +139,8 @@ class IntegratedUnitTestContext(object): if not api_service: raise Exception("API Service was None") - # WSGI shutdown broken :-( - #self.services.append(volume_service) self.api_service = api_service self.auth_url = 'http://localhost:8774/v1.0' return api_service - - # WSGI shutdown broken :-( - # bug731668 - #@staticmethod - #def get(): - # if not IntegratedUnitTestContext.__INSTANCE: - # IntegratedUnitTestContext.startup() - # #raise Error("Must call IntegratedUnitTestContext::startup") - # return IntegratedUnitTestContext.__INSTANCE - - @staticmethod - def startup(): - # Because WSGI shutdown is broken at the moment, we have to recycle - # bug731668 - if IntegratedUnitTestContext.__INSTANCE: - #raise Error("Multiple calls to IntegratedUnitTestContext.startup") - IntegratedUnitTestContext.__INSTANCE.setup() - else: - IntegratedUnitTestContext.__INSTANCE = IntegratedUnitTestContext() - return IntegratedUnitTestContext.__INSTANCE - - @staticmethod - def shutdown(): - if not IntegratedUnitTestContext.__INSTANCE: - raise Error("Must call IntegratedUnitTestContext::startup") - IntegratedUnitTestContext.__INSTANCE.cleanup() - # WSGI shutdown broken :-( - # bug731668 - #IntegratedUnitTestContext.__INSTANCE = None diff --git a/nova/tests/integrated/test_login.py b/nova/tests/integrated/test_login.py index 501f8c919..6b241f240 100644 --- a/nova/tests/integrated/test_login.py +++ b/nova/tests/integrated/test_login.py @@ -33,12 +33,12 @@ FLAGS.verbose = True class LoginTest(test.TestCase): def setUp(self): super(LoginTest, self).setUp() - context = integrated_helpers.IntegratedUnitTestContext.startup() - self.user = context.test_user + self.context = integrated_helpers.IntegratedUnitTestContext() + self.user = self.context.test_user self.api = self.user.openstack_api def tearDown(self): - integrated_helpers.IntegratedUnitTestContext.shutdown() + self.context.cleanup() super(LoginTest, self).tearDown() def test_login(self): diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py deleted file mode 100644 index 4e2ac205e..000000000 --- a/nova/tests/objectstore_unittest.py +++ /dev/null @@ -1,315 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unittets for S3 objectstore clone. -""" - -import boto -import glob -import hashlib -import os -import shutil -import tempfile - -from boto.s3.connection import S3Connection, OrdinaryCallingFormat -from twisted.internet import reactor, threads, defer -from twisted.web import http, server - -from nova import context -from nova import flags -from nova import objectstore -from nova import test -from nova.auth import manager -from nova.exception import NotEmpty, NotFound -from nova.objectstore import image -from nova.objectstore.handler import S3 - - -FLAGS = flags.FLAGS - -# Create a unique temporary directory. We don't delete after test to -# allow checking the contents after running tests. Users and/or tools -# running the tests need to remove the tests directories. -OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-') - -# Create bucket/images path -os.makedirs(os.path.join(OSS_TEMPDIR, 'images')) -os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets')) - - -class ObjectStoreTestCase(test.TestCase): - """Test objectstore API directly.""" - - def setUp(self): - """Setup users and projects.""" - super(ObjectStoreTestCase, self).setUp() - self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'), - images_path=os.path.join(OSS_TEMPDIR, 'images'), - ca_path=os.path.join(os.path.dirname(__file__), 'CA')) - - self.auth_manager = manager.AuthManager() - self.auth_manager.create_user('user1') - self.auth_manager.create_user('user2') - self.auth_manager.create_user('admin_user', admin=True) - self.auth_manager.create_project('proj1', 'user1', 'a proj', ['user1']) - self.auth_manager.create_project('proj2', 'user2', 'a proj', ['user2']) - self.context = context.RequestContext('user1', 'proj1') - - def tearDown(self): - """Tear down users and projects.""" - self.auth_manager.delete_project('proj1') - self.auth_manager.delete_project('proj2') - self.auth_manager.delete_user('user1') - self.auth_manager.delete_user('user2') - self.auth_manager.delete_user('admin_user') - super(ObjectStoreTestCase, self).tearDown() - - def test_buckets(self): - """Test the bucket API.""" - objectstore.bucket.Bucket.create('new_bucket', self.context) - bucket = objectstore.bucket.Bucket('new_bucket') - - # creator is authorized to use bucket - self.assert_(bucket.is_authorized(self.context)) - - # another user is not authorized - context2 = context.RequestContext('user2', 'proj2') - self.assertFalse(bucket.is_authorized(context2)) - - # admin is authorized to use bucket - admin_context = context.RequestContext('admin_user', None) - self.assertTrue(bucket.is_authorized(admin_context)) - - # new buckets are empty - self.assertTrue(bucket.list_keys()['Contents'] == []) - - # storing keys works - bucket['foo'] = "bar" - - self.assertEquals(len(bucket.list_keys()['Contents']), 1) - - self.assertEquals(bucket['foo'].read(), 'bar') - - # md5 of key works - self.assertEquals(bucket['foo'].md5, hashlib.md5('bar').hexdigest()) - - # deleting non-empty bucket should throw a NotEmpty exception - self.assertRaises(NotEmpty, bucket.delete) - - # deleting key - del bucket['foo'] - - # deleting empty bucket - bucket.delete() - - # accessing deleted bucket throws exception - self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket') - - def test_images(self): - self.do_test_images('1mb.manifest.xml', True, - 'image_bucket1', 'i-testing1') - - def test_images_no_kernel_or_ramdisk(self): - self.do_test_images('1mb.no_kernel_or_ramdisk.manifest.xml', - False, 'image_bucket2', 'i-testing2') - - def do_test_images(self, manifest_file, expect_kernel_and_ramdisk, - image_bucket, image_name): - "Test the image API." - - # create a bucket for our bundle - objectstore.bucket.Bucket.create(image_bucket, self.context) - bucket = objectstore.bucket.Bucket(image_bucket) - - # upload an image manifest/parts - bundle_path = os.path.join(os.path.dirname(__file__), 'bundle') - for path in glob.glob(bundle_path + '/*'): - bucket[os.path.basename(path)] = open(path, 'rb').read() - - # register an image - image.Image.register_aws_image(image_name, - '%s/%s' % (image_bucket, manifest_file), - self.context) - - # verify image - my_img = image.Image(image_name) - result_image_file = os.path.join(my_img.path, 'image') - self.assertEqual(os.stat(result_image_file).st_size, 1048576) - - sha = hashlib.sha1(open(result_image_file).read()).hexdigest() - self.assertEqual(sha, '3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3') - - if expect_kernel_and_ramdisk: - # Verify the default kernel and ramdisk are set - self.assertEqual(my_img.metadata['kernelId'], 'aki-test') - self.assertEqual(my_img.metadata['ramdiskId'], 'ari-test') - else: - # Verify that the default kernel and ramdisk (the one from FLAGS) - # doesn't get embedded in the metadata - self.assertFalse('kernelId' in my_img.metadata) - self.assertFalse('ramdiskId' in my_img.metadata) - - # verify image permissions - context2 = context.RequestContext('user2', 'proj2') - self.assertFalse(my_img.is_authorized(context2)) - - # change user-editable fields - my_img.update_user_editable_fields({'display_name': 'my cool image'}) - self.assertEqual('my cool image', my_img.metadata['displayName']) - my_img.update_user_editable_fields({'display_name': ''}) - self.assert_(not my_img.metadata['displayName']) - - -class TestHTTPChannel(http.HTTPChannel): - """Dummy site required for twisted.web""" - - def checkPersistence(self, _, __): # pylint: disable=C0103 - """Otherwise we end up with an unclean reactor.""" - return False - - -class TestSite(server.Site): - """Dummy site required for twisted.web""" - protocol = TestHTTPChannel - - -class S3APITestCase(test.TestCase): - """Test objectstore through S3 API.""" - - def setUp(self): - """Setup users, projects, and start a test server.""" - super(S3APITestCase, self).setUp() - - FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' - FLAGS.buckets_path = os.path.join(OSS_TEMPDIR, 'buckets') - - self.auth_manager = manager.AuthManager() - self.admin_user = self.auth_manager.create_user('admin', admin=True) - self.admin_project = self.auth_manager.create_project('admin', - self.admin_user) - - shutil.rmtree(FLAGS.buckets_path) - os.mkdir(FLAGS.buckets_path) - - root = S3() - self.site = TestSite(root) - # pylint: disable=E1101 - self.listening_port = reactor.listenTCP(0, self.site, - interface='127.0.0.1') - # pylint: enable=E1101 - self.tcp_port = self.listening_port.getHost().port - - if not boto.config.has_section('Boto'): - boto.config.add_section('Boto') - boto.config.set('Boto', 'num_retries', '0') - self.conn = S3Connection(aws_access_key_id=self.admin_user.access, - aws_secret_access_key=self.admin_user.secret, - host='127.0.0.1', - port=self.tcp_port, - is_secure=False, - calling_format=OrdinaryCallingFormat()) - - def get_http_connection(host, is_secure): - """Get a new S3 connection, don't attempt to reuse connections.""" - return self.conn.new_http_connection(host, is_secure) - - self.conn.get_http_connection = get_http_connection - - def _ensure_no_buckets(self, buckets): # pylint: disable=C0111 - self.assertEquals(len(buckets), 0, "Bucket list was not empty") - return True - - def _ensure_one_bucket(self, buckets, name): # pylint: disable=C0111 - self.assertEquals(len(buckets), 1, - "Bucket list didn't have exactly one element in it") - self.assertEquals(buckets[0].name, name, "Wrong name") - return True - - def test_000_list_buckets(self): - """Make sure we are starting with no buckets.""" - deferred = threads.deferToThread(self.conn.get_all_buckets) - deferred.addCallback(self._ensure_no_buckets) - return deferred - - def test_001_create_and_delete_bucket(self): - """Test bucket creation and deletion.""" - bucket_name = 'testbucket' - - deferred = threads.deferToThread(self.conn.create_bucket, bucket_name) - deferred.addCallback(lambda _: - threads.deferToThread(self.conn.get_all_buckets)) - - deferred.addCallback(self._ensure_one_bucket, bucket_name) - - deferred.addCallback(lambda _: - threads.deferToThread(self.conn.delete_bucket, - bucket_name)) - deferred.addCallback(lambda _: - threads.deferToThread(self.conn.get_all_buckets)) - deferred.addCallback(self._ensure_no_buckets) - return deferred - - def test_002_create_bucket_and_key_and_delete_key_again(self): - """Test key operations on buckets.""" - bucket_name = 'testbucket' - key_name = 'somekey' - key_contents = 'somekey' - - deferred = threads.deferToThread(self.conn.create_bucket, bucket_name) - deferred.addCallback(lambda b: - threads.deferToThread(b.new_key, key_name)) - deferred.addCallback(lambda k: - threads.deferToThread(k.set_contents_from_string, - key_contents)) - - def ensure_key_contents(bucket_name, key_name, contents): - """Verify contents for a key in the given bucket.""" - bucket = self.conn.get_bucket(bucket_name) - key = bucket.get_key(key_name) - self.assertEquals(key.get_contents_as_string(), contents, - "Bad contents") - - deferred.addCallback(lambda _: - threads.deferToThread(ensure_key_contents, - bucket_name, key_name, - key_contents)) - - def delete_key(bucket_name, key_name): - """Delete a key for the given bucket.""" - bucket = self.conn.get_bucket(bucket_name) - key = bucket.get_key(key_name) - key.delete() - - deferred.addCallback(lambda _: - threads.deferToThread(delete_key, bucket_name, - key_name)) - deferred.addCallback(lambda _: - threads.deferToThread(self.conn.get_bucket, - bucket_name)) - deferred.addCallback(lambda b: threads.deferToThread(b.get_all_keys)) - deferred.addCallback(self._ensure_no_buckets) - return deferred - - def tearDown(self): - """Tear down auth and test server.""" - self.auth_manager.delete_user('admin') - self.auth_manager.delete_project('admin') - stop_listening = defer.maybeDeferred(self.listening_port.stopListening) - super(S3APITestCase, self).tearDown() - return defer.DeferredList([stop_listening]) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index cf8ee7eff..00803d0ad 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -35,31 +35,22 @@ from nova import log as logging from nova import rpc from nova import service from nova import test +from nova import utils from nova.auth import manager from nova.compute import power_state from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils from nova.image import local -from nova.objectstore import image FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.cloud') -# Temp dirs for working with image attributes through the cloud controller -# (stole this from objectstore_unittest.py) -OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-') -IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images') -os.makedirs(IMAGES_PATH) - -# TODO(termie): these tests are rather fragile, they should at the lest be -# wiping database state after each run class CloudTestCase(test.TestCase): def setUp(self): super(CloudTestCase, self).setUp() - self.flags(connection_type='fake', - images_path=IMAGES_PATH) + self.flags(connection_type='fake') self.conn = rpc.Connection.instance() @@ -70,6 +61,7 @@ class CloudTestCase(test.TestCase): self.compute = self.start_service('compute') self.scheduter = self.start_service('scheduler') self.network = self.start_service('network') + self.image_service = utils.import_object(FLAGS.image_service) self.manager = manager.AuthManager() self.user = self.manager.create_user('admin', 'admin', 'admin', True) @@ -318,41 +310,6 @@ class CloudTestCase(test.TestCase): LOG.debug(_("Terminating instance %s"), instance_id) rv = self.compute.terminate_instance(instance_id) - @staticmethod - def _fake_set_image_description(ctxt, image_id, description): - from nova.objectstore import handler - - class req: - pass - - request = req() - request.context = ctxt - request.args = {'image_id': [image_id], - 'description': [description]} - - resource = handler.ImagesResource() - resource.render_POST(request) - - def test_user_editable_image_endpoint(self): - pathdir = os.path.join(FLAGS.images_path, 'ami-testing') - os.mkdir(pathdir) - info = {'isPublic': False} - with open(os.path.join(pathdir, 'info.json'), 'w') as f: - json.dump(info, f) - img = image.Image('ami-testing') - # self.cloud.set_image_description(self.context, 'ami-testing', - # 'Foo Img') - # NOTE(vish): Above won't work unless we start objectstore or create - # a fake version of api/ec2/images.py conn that can - # call methods directly instead of going through boto. - # for now, just cheat and call the method directly - self._fake_set_image_description(self.context, 'ami-testing', - 'Foo Img') - self.assertEqual('Foo Img', img.metadata['description']) - self._fake_set_image_description(self.context, 'ami-testing', '') - self.assertEqual('', img.metadata['description']) - shutil.rmtree(pathdir) - def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) ec2_id = ec2utils.id_to_ec2_id(inst['id']) diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py index 80e4d2e1f..588a24b35 100644 --- a/nova/tests/test_direct.py +++ b/nova/tests/test_direct.py @@ -25,12 +25,18 @@ import webob from nova import compute from nova import context from nova import exception +from nova import network from nova import test +from nova import volume from nova import utils from nova.api import direct from nova.tests import test_cloud +class ArbitraryObject(object): + pass + + class FakeService(object): def echo(self, context, data): return {'data': data} @@ -39,6 +45,9 @@ class FakeService(object): return {'user': context.user_id, 'project': context.project_id} + def invalid_return(self, context): + return ArbitraryObject() + class DirectTestCase(test.TestCase): def setUp(self): @@ -84,6 +93,12 @@ class DirectTestCase(test.TestCase): resp_parsed = json.loads(resp.body) self.assertEqual(resp_parsed['data'], 'foo') + def test_invalid(self): + req = webob.Request.blank('/fake/invalid_return') + req.environ['openstack.context'] = self.context + req.method = 'POST' + self.assertRaises(exception.Error, req.get_response, self.router) + def test_proxy(self): proxy = direct.Proxy(self.router) rv = proxy.fake.echo(self.context, data='baz') @@ -93,12 +108,20 @@ class DirectTestCase(test.TestCase): class DirectCloudTestCase(test_cloud.CloudTestCase): def setUp(self): super(DirectCloudTestCase, self).setUp() - compute_handle = compute.API(network_api=self.cloud.network_api, - volume_api=self.cloud.volume_api) + compute_handle = compute.API(image_service=self.cloud.image_service) + volume_handle = volume.API() + network_handle = network.API() direct.register_service('compute', compute_handle) + direct.register_service('volume', volume_handle) + direct.register_service('network', network_handle) + self.router = direct.JsonParamsMiddleware(direct.Router()) proxy = direct.Proxy(self.router) self.cloud.compute_api = proxy.compute + self.cloud.volume_api = proxy.volume + self.cloud.network_api = proxy.network + compute_handle.volume_api = proxy.volume + compute_handle.network_api = proxy.network def tearDown(self): super(DirectCloudTestCase, self).tearDown() diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py new file mode 100644 index 000000000..c78772f27 --- /dev/null +++ b/nova/tests/test_objectstore.py @@ -0,0 +1,148 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unittets for S3 objectstore clone. +""" + +import boto +import glob +import hashlib +import os +import shutil +import tempfile + +from boto import exception as boto_exception +from boto.s3 import connection as s3 + +from nova import context +from nova import exception +from nova import flags +from nova import wsgi +from nova import test +from nova.auth import manager +from nova.objectstore import s3server + + +FLAGS = flags.FLAGS + +# Create a unique temporary directory. We don't delete after test to +# allow checking the contents after running tests. Users and/or tools +# running the tests need to remove the tests directories. +OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-') + +# Create bucket/images path +os.makedirs(os.path.join(OSS_TEMPDIR, 'images')) +os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets')) + + +class S3APITestCase(test.TestCase): + """Test objectstore through S3 API.""" + + def setUp(self): + """Setup users, projects, and start a test server.""" + super(S3APITestCase, self).setUp() + self.flags(auth_driver='nova.auth.ldapdriver.FakeLdapDriver', + buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'), + s3_host='127.0.0.1') + + self.auth_manager = manager.AuthManager() + self.admin_user = self.auth_manager.create_user('admin', admin=True) + self.admin_project = self.auth_manager.create_project('admin', + self.admin_user) + + shutil.rmtree(FLAGS.buckets_path) + os.mkdir(FLAGS.buckets_path) + + router = s3server.S3Application(FLAGS.buckets_path) + server = wsgi.Server() + server.start(router, FLAGS.s3_port, host=FLAGS.s3_host) + + if not boto.config.has_section('Boto'): + boto.config.add_section('Boto') + boto.config.set('Boto', 'num_retries', '0') + conn = s3.S3Connection(aws_access_key_id=self.admin_user.access, + aws_secret_access_key=self.admin_user.secret, + host=FLAGS.s3_host, + port=FLAGS.s3_port, + is_secure=False, + calling_format=s3.OrdinaryCallingFormat()) + self.conn = conn + + def get_http_connection(host, is_secure): + """Get a new S3 connection, don't attempt to reuse connections.""" + return self.conn.new_http_connection(host, is_secure) + + self.conn.get_http_connection = get_http_connection + + def _ensure_no_buckets(self, buckets): # pylint: disable=C0111 + self.assertEquals(len(buckets), 0, "Bucket list was not empty") + return True + + def _ensure_one_bucket(self, buckets, name): # pylint: disable=C0111 + self.assertEquals(len(buckets), 1, + "Bucket list didn't have exactly one element in it") + self.assertEquals(buckets[0].name, name, "Wrong name") + return True + + def test_000_list_buckets(self): + """Make sure we are starting with no buckets.""" + self._ensure_no_buckets(self.conn.get_all_buckets()) + + def test_001_create_and_delete_bucket(self): + """Test bucket creation and deletion.""" + bucket_name = 'testbucket' + + self.conn.create_bucket(bucket_name) + self._ensure_one_bucket(self.conn.get_all_buckets(), bucket_name) + self.conn.delete_bucket(bucket_name) + self._ensure_no_buckets(self.conn.get_all_buckets()) + + def test_002_create_bucket_and_key_and_delete_key_again(self): + """Test key operations on buckets.""" + bucket_name = 'testbucket' + key_name = 'somekey' + key_contents = 'somekey' + + b = self.conn.create_bucket(bucket_name) + k = b.new_key(key_name) + k.set_contents_from_string(key_contents) + + bucket = self.conn.get_bucket(bucket_name) + + # make sure the contents are correct + key = bucket.get_key(key_name) + self.assertEquals(key.get_contents_as_string(), key_contents, + "Bad contents") + + # delete the key + key.delete() + + self._ensure_no_buckets(bucket.get_all_keys()) + + def test_unknown_bucket(self): + bucket_name = 'falalala' + self.assertRaises(boto_exception.S3ResponseError, + self.conn.get_bucket, + bucket_name) + + def tearDown(self): + """Tear down auth and test server.""" + self.auth_manager.delete_user('admin') + self.auth_manager.delete_project('admin') + super(S3APITestCase, self).tearDown() diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 4820e04fb..44d7c91eb 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -36,7 +36,7 @@ class RpcTestCase(test.TestCase): super(RpcTestCase, self).setUp() self.conn = rpc.Connection.instance(True) self.receiver = TestReceiver() - self.consumer = rpc.AdapterConsumer(connection=self.conn, + self.consumer = rpc.TopicAdapterConsumer(connection=self.conn, topic='test', proxy=self.receiver) self.consumer.attach_to_eventlet() @@ -97,7 +97,7 @@ class RpcTestCase(test.TestCase): nested = Nested() conn = rpc.Connection.instance(True) - consumer = rpc.AdapterConsumer(connection=conn, + consumer = rpc.TopicAdapterConsumer(connection=conn, topic='nested', proxy=nested) consumer.attach_to_eventlet() diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 244e43bd9..6df74dd61 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -21,6 +21,9 @@ Tests For Scheduler import datetime import mox +import novaclient.exceptions +import stubout +import webob from mox import IgnoreArg from nova import context @@ -32,6 +35,7 @@ from nova import test from nova import rpc from nova import utils from nova.auth import manager as auth_manager +from nova.scheduler import api from nova.scheduler import manager from nova.scheduler import driver from nova.compute import power_state @@ -937,3 +941,160 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) + + +class FakeZone(object): + def __init__(self, api_url, username, password): + self.api_url = api_url + self.username = username + self.password = password + + +def zone_get_all(context): + return [ + FakeZone('http://example.com', 'bob', 'xxx'), + ] + + +class FakeRerouteCompute(api.reroute_compute): + def _call_child_zones(self, zones, function): + return [] + + def get_collection_context_and_id(self, args, kwargs): + return ("servers", None, 1) + + def unmarshall_result(self, zone_responses): + return dict(magic="found me") + + +def go_boom(self, context, instance): + raise exception.InstanceNotFound("boom message", instance) + + +def found_instance(self, context, instance): + return dict(name='myserver') + + +class FakeResource(object): + def __init__(self, attribute_dict): + for k, v in attribute_dict.iteritems(): + setattr(self, k, v) + + def pause(self): + pass + + +class ZoneRedirectTest(test.TestCase): + def setUp(self): + super(ZoneRedirectTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + + self.stubs.Set(db, 'zone_get_all', zone_get_all) + + self.enable_zone_routing = FLAGS.enable_zone_routing + FLAGS.enable_zone_routing = True + + def tearDown(self): + self.stubs.UnsetAll() + FLAGS.enable_zone_routing = self.enable_zone_routing + super(ZoneRedirectTest, self).tearDown() + + def test_trap_found_locally(self): + decorator = FakeRerouteCompute("foo") + try: + result = decorator(found_instance)(None, None, 1) + except api.RedirectResult, e: + self.fail(_("Successful database hit should succeed")) + + def test_trap_not_found_locally(self): + decorator = FakeRerouteCompute("foo") + try: + result = decorator(go_boom)(None, None, 1) + self.assertFail(_("Should have rerouted.")) + except api.RedirectResult, e: + self.assertEquals(e.results['magic'], 'found me') + + def test_routing_flags(self): + FLAGS.enable_zone_routing = False + decorator = FakeRerouteCompute("foo") + try: + result = decorator(go_boom)(None, None, 1) + self.assertFail(_("Should have thrown exception.")) + except exception.InstanceNotFound, e: + self.assertEquals(e.message, 'boom message') + + def test_get_collection_context_and_id(self): + decorator = api.reroute_compute("foo") + self.assertEquals(decorator.get_collection_context_and_id( + (None, 10, 20), {}), ("servers", 10, 20)) + self.assertEquals(decorator.get_collection_context_and_id( + (None, 11,), dict(instance_id=21)), ("servers", 11, 21)) + self.assertEquals(decorator.get_collection_context_and_id( + (None,), dict(context=12, instance_id=22)), ("servers", 12, 22)) + + def test_unmarshal_single_server(self): + decorator = api.reroute_compute("foo") + self.assertEquals(decorator.unmarshall_result([]), {}) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, b=2)), ]), + dict(server=dict(a=1, b=2))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, _b=2)), ]), + dict(server=dict(a=1,))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(a=1, manager=2)), ]), + dict(server=dict(a=1,))) + self.assertEquals(decorator.unmarshall_result( + [FakeResource(dict(_a=1, manager=2)), ]), + dict(server={})) + + +class FakeServerCollection(object): + def get(self, instance_id): + return FakeResource(dict(a=10, b=20)) + + def find(self, name): + return FakeResource(dict(a=11, b=22)) + + +class FakeEmptyServerCollection(object): + def get(self, f): + raise novaclient.NotFound(1) + + def find(self, name): + raise novaclient.NotFound(2) + + +class FakeNovaClient(object): + def __init__(self, collection): + self.servers = collection + + +class DynamicNovaClientTest(test.TestCase): + def test_issue_novaclient_command_found(self): + zone = FakeZone('http://example.com', 'bob', 'xxx') + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "get", 100).a, 10) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "find", "name").b, 22) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeServerCollection()), + zone, "servers", "pause", 100), None) + + def test_issue_novaclient_command_not_found(self): + zone = FakeZone('http://example.com', 'bob', 'xxx') + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "get", 100), None) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "find", "name"), None) + + self.assertEquals(api._issue_novaclient_command( + FakeNovaClient(FakeEmptyServerCollection()), + zone, "servers", "any", "name"), None) diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index 393f9d20b..d48de2057 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -109,20 +109,29 @@ class ServiceTestCase(test.TestCase): app = service.Service.create(host=host, binary=binary) self.mox.StubOutWithMock(rpc, - 'AdapterConsumer', + 'TopicAdapterConsumer', use_mock_anything=True) - rpc.AdapterConsumer(connection=mox.IgnoreArg(), + self.mox.StubOutWithMock(rpc, + 'FanoutAdapterConsumer', + use_mock_anything=True) + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), topic=topic, proxy=mox.IsA(service.Service)).AndReturn( - rpc.AdapterConsumer) + rpc.TopicAdapterConsumer) - rpc.AdapterConsumer(connection=mox.IgnoreArg(), + rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(), topic='%s.%s' % (topic, host), proxy=mox.IsA(service.Service)).AndReturn( - rpc.AdapterConsumer) + rpc.TopicAdapterConsumer) + + rpc.FanoutAdapterConsumer(connection=mox.IgnoreArg(), + topic=topic, + proxy=mox.IsA(service.Service)).AndReturn( + rpc.FanoutAdapterConsumer) - rpc.AdapterConsumer.attach_to_eventlet() - rpc.AdapterConsumer.attach_to_eventlet() + rpc.TopicAdapterConsumer.attach_to_eventlet() + rpc.TopicAdapterConsumer.attach_to_eventlet() + rpc.FanoutAdapterConsumer.attach_to_eventlet() service_create = {'host': host, 'binary': binary, @@ -279,6 +288,7 @@ class ServiceTestCase(test.TestCase): self.mox.StubOutWithMock(service.rpc.Connection, 'instance') service.rpc.Connection.instance(new=mox.IgnoreArg()) service.rpc.Connection.instance(new=mox.IgnoreArg()) + service.rpc.Connection.instance(new=mox.IgnoreArg()) self.mox.StubOutWithMock(serv.manager.driver, 'update_available_resource') serv.manager.driver.update_available_resource(mox.IgnoreArg(), host) diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py index e237674e6..35c838065 100644 --- a/nova/tests/test_test.py +++ b/nova/tests/test_test.py @@ -34,7 +34,7 @@ class IsolationTestCase(test.TestCase): def test_rpc_consumer_isolation(self): connection = rpc.Connection.instance(new=True) - consumer = rpc.TopicConsumer(connection, topic='compute') + consumer = rpc.TopicAdapterConsumer(connection, topic='compute') consumer.register_callback( lambda x, y: self.fail('I should never be called')) consumer.attach_to_eventlet() diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index fb0ba53b1..3a03159ff 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -796,7 +796,8 @@ class NWFilterTestCase(test.TestCase): instance_ref = db.instance_create(self.context, {'user_id': 'fake', - 'project_id': 'fake'}) + 'project_id': 'fake', + 'mac_address': '00:A0:C9:14:C8:29'}) inst_id = instance_ref['id'] ip = '10.11.12.13' @@ -813,7 +814,8 @@ class NWFilterTestCase(test.TestCase): 'instance_id': instance_ref['id']}) def _ensure_all_called(): - instance_filter = 'nova-instance-%s' % instance_ref['name'] + instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], + '00A0C914C829') secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] for required in [secgroup_filter, 'allow-dhcp-server', 'no-arp-spoofing', 'no-ip-spoofing', diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py new file mode 100644 index 000000000..22b66010a --- /dev/null +++ b/nova/tests/test_vmwareapi.py @@ -0,0 +1,252 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for VMWareAPI.
+"""
+
+import stubout
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.compute import power_state
+from nova.tests.glance import stubs as glance_stubs
+from nova.tests.vmwareapi import db_fakes
+from nova.tests.vmwareapi import stubs
+from nova.virt import vmwareapi_conn
+from nova.virt.vmwareapi import fake as vmwareapi_fake
+
+
+FLAGS = flags.FLAGS
+
+
+class VMWareAPIVMTestCase(test.TestCase):
+ """Unit tests for Vmware API connection calls."""
+
+ def setUp(self):
+ super(VMWareAPIVMTestCase, self).setUp()
+ self.flags(vmwareapi_host_ip='test_url',
+ vmwareapi_host_username='test_username',
+ vmwareapi_host_password='test_pass')
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake',
+ admin=True)
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.stubs = stubout.StubOutForTesting()
+ vmwareapi_fake.reset()
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ stubs.set_stubs(self.stubs)
+ glance_stubs.stubout_glance_client(self.stubs,
+ glance_stubs.FakeGlance)
+ self.conn = vmwareapi_conn.get_connection(False)
+
+ def _create_instance_in_the_db(self):
+ values = {'name': 1,
+ 'id': 1,
+ 'project_id': self.project.id,
+ 'user_id': self.user.id,
+ 'image_id': "1",
+ 'kernel_id': "1",
+ 'ramdisk_id': "1",
+ 'instance_type': 'm1.large',
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ }
+ self.instance = db.instance_create(values)
+
+ def _create_vm(self):
+ """Create and spawn the VM."""
+ self._create_instance_in_the_db()
+ self.type_data = db.instance_type_get_by_name(None, 'm1.large')
+ self.conn.spawn(self.instance)
+ self._check_vm_record()
+
+ def _check_vm_record(self):
+ """
+ Check if the spawned VM's properties correspond to the instance in
+ the db.
+ """
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 1)
+
+ # Get Nova record for VM
+ vm_info = self.conn.get_info(1)
+
+ # Get record for VM
+ vms = vmwareapi_fake._get_objects("VirtualMachine")
+ vm = vms[0]
+
+ # Check that m1.large above turned into the right thing.
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ vcpus = self.type_data['vcpus']
+ self.assertEquals(vm_info['max_mem'], mem_kib)
+ self.assertEquals(vm_info['mem'], mem_kib)
+ self.assertEquals(vm.get("summary.config.numCpu"), vcpus)
+ self.assertEquals(vm.get("summary.config.memorySizeMB"),
+ self.type_data['memory_mb'])
+
+ # Check that the VM is running according to Nova
+ self.assertEquals(vm_info['state'], power_state.RUNNING)
+
+ # Check that the VM is running according to vSphere API.
+ self.assertEquals(vm.get("runtime.powerState"), 'poweredOn')
+
+ def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
+ """
+ Check if the get_info returned values correspond to the instance
+ object in the db.
+ """
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ self.assertEquals(info["state"], pwr_state)
+ self.assertEquals(info["max_mem"], mem_kib)
+ self.assertEquals(info["mem"], mem_kib)
+ self.assertEquals(info["num_cpu"], self.type_data['vcpus'])
+
+ def test_list_instances(self):
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 0)
+
+ def test_list_instances_1(self):
+ self._create_vm()
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 1)
+
+ def test_spawn(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_snapshot(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.snapshot(self.instance, "Test-Snapshot")
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_snapshot_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.snapshot, self.instance,
+ "Test-Snapshot")
+
+ def test_reboot(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.reboot(self.instance)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_reboot_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.reboot, self.instance)
+
+ def test_reboot_not_poweredon(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.PAUSED)
+ self.assertRaises(Exception, self.conn.reboot, self.instance)
+
+ def test_suspend(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.PAUSED)
+
+ def test_suspend_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.suspend, self.instance,
+ self.dummy_callback_handler)
+
+ def test_resume(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.PAUSED)
+ self.conn.resume(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_resume_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.resume, self.instance,
+ self.dummy_callback_handler)
+
+ def test_resume_not_suspended(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertRaises(Exception, self.conn.resume, self.instance,
+ self.dummy_callback_handler)
+
+ def test_get_info(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_destroy(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 1)
+ self.conn.destroy(self.instance)
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 0)
+
+ def test_destroy_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertEquals(self.conn.destroy(self.instance), None)
+
+ def test_pause(self):
+ pass
+
+ def test_unpause(self):
+ pass
+
+ def test_diagnostics(self):
+ pass
+
+ def test_get_console_output(self):
+ pass
+
+ def test_get_ajax_console(self):
+ pass
+
+ def dummy_callback_handler(self, ret):
+ """
+ Dummy callback function to be passed to suspend, resume, etc., calls.
+ """
+ pass
+
+ def tearDown(self):
+ super(VMWareAPIVMTestCase, self).tearDown()
+ vmwareapi_fake.cleanup()
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+ self.stubs.UnsetAll()
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 5d68ca2ae..d71b75f3f 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -356,8 +356,8 @@ class ISCSITestCase(DriverTestCase): tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0]) self.mox.StubOutWithMock(self.volume.driver, '_execute') self.volume.driver._execute("sudo", "ietadm", "--op", "show", - "--tid=%(tid)d" % locals() - ).AndRaise(exception.ProcessExecutionError()) + "--tid=%(tid)d" % locals()).AndRaise( + exception.ProcessExecutionError()) self.mox.ReplayAll() self.assertRaises(exception.ProcessExecutionError, diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 66a973a78..e54ffe712 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -186,6 +186,7 @@ class XenAPIVMTestCase(test.TestCase): stubs.stubout_stream_disk(self.stubs) stubs.stubout_is_vdi_pv(self.stubs) self.stubs.Set(VMOps, 'reset_network', reset_network) + stubs.stub_out_vm_methods(self.stubs) glance_stubs.stubout_glance_client(self.stubs, glance_stubs.FakeGlance) self.conn = xenapi_conn.get_connection(False) @@ -369,6 +370,17 @@ class XenAPIVMTestCase(test.TestCase): self.assertEquals(vif_rec['qos_algorithm_params']['kbps'], str(4 * 1024)) + def test_rescue(self): + instance = self._create_instance() + conn = xenapi_conn.get_connection(False) + conn.rescue(instance, None) + + def test_unrescue(self): + instance = self._create_instance() + conn = xenapi_conn.get_connection(False) + # Ensure that it will not unrescue a non-rescued instance. + self.assertRaises(Exception, conn.unrescue, instance, None) + def tearDown(self): super(XenAPIVMTestCase, self).tearDown() self.manager.delete_project(self.project) diff --git a/nova/tests/test_zones.py b/nova/tests/test_zones.py index 5a52a0506..688dc704d 100644 --- a/nova/tests/test_zones.py +++ b/nova/tests/test_zones.py @@ -76,6 +76,40 @@ class ZoneManagerTestCase(test.TestCase): self.assertEquals(len(zm.zone_states), 1) self.assertEquals(zm.zone_states[1].username, 'user1') + def test_service_capabilities(self): + zm = zone_manager.ZoneManager() + caps = zm.get_zone_capabilities(self, None) + self.assertEquals(caps, {}) + + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + caps = zm.get_zone_capabilities(self, None) + self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2))) + + zm.update_service_capabilities("svc1", "host1", dict(a=2, b=3)) + caps = zm.get_zone_capabilities(self, None) + self.assertEquals(caps, dict(svc1_a=(2, 2), svc1_b=(3, 3))) + + zm.update_service_capabilities("svc1", "host2", dict(a=20, b=30)) + caps = zm.get_zone_capabilities(self, None) + self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30))) + + zm.update_service_capabilities("svc10", "host1", dict(a=99, b=99)) + caps = zm.get_zone_capabilities(self, None) + self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30), + svc10_a=(99, 99), svc10_b=(99, 99))) + + zm.update_service_capabilities("svc1", "host3", dict(c=5)) + caps = zm.get_zone_capabilities(self, None) + self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30), + svc1_c=(5, 5), svc10_a=(99, 99), + svc10_b=(99, 99))) + + caps = zm.get_zone_capabilities(self, 'svc1') + self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30), + svc1_c=(5, 5))) + caps = zm.get_zone_capabilities(self, 'svc10') + self.assertEquals(caps, dict(svc10_a=(99, 99), svc10_b=(99, 99))) + def test_refresh_from_db_replace_existing(self): zm = zone_manager.ZoneManager() zone_state = zone_manager.ZoneState() diff --git a/nova/tests/vmwareapi/__init__.py b/nova/tests/vmwareapi/__init__.py new file mode 100644 index 000000000..478ee742b --- /dev/null +++ b/nova/tests/vmwareapi/__init__.py @@ -0,0 +1,21 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`vmwareapi` -- Stubs for VMware API
+=======================================
+"""
diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py new file mode 100644 index 000000000..0addd5573 --- /dev/null +++ b/nova/tests/vmwareapi/db_fakes.py @@ -0,0 +1,109 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Stubouts, mocks and fixtures for the test suite
+"""
+
+import time
+
+from nova import db
+from nova import utils
+
+
+def stub_out_db_instance_api(stubs):
+ """Stubs out the db API for creating Instances."""
+
+ INSTANCE_TYPES = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
+ 'm1.medium':
+ dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
+ 'm1.xlarge':
+ dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+
+ class FakeModel(object):
+ """Stubs out for model."""
+
+ def __init__(self, values):
+ self.values = values
+
+ def __getattr__(self, name):
+ return self.values[name]
+
+ def __getitem__(self, key):
+ if key in self.values:
+ return self.values[key]
+ else:
+ raise NotImplementedError()
+
+ def fake_instance_create(values):
+ """Stubs out the db.instance_create method."""
+
+ type_data = INSTANCE_TYPES[values['instance_type']]
+
+ base_options = {
+ 'name': values['name'],
+ 'id': values['id'],
+ 'reservation_id': utils.generate_uid('r'),
+ 'image_id': values['image_id'],
+ 'kernel_id': values['kernel_id'],
+ 'ramdisk_id': values['ramdisk_id'],
+ 'state_description': 'scheduling',
+ 'user_id': values['user_id'],
+ 'project_id': values['project_id'],
+ 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
+ 'instance_type': values['instance_type'],
+ 'memory_mb': type_data['memory_mb'],
+ 'mac_address': values['mac_address'],
+ 'vcpus': type_data['vcpus'],
+ 'local_gb': type_data['local_gb'],
+ }
+ return FakeModel(base_options)
+
+ def fake_network_get_by_instance(context, instance_id):
+ """Stubs out the db.network_get_by_instance method."""
+
+ fields = {
+ 'bridge': 'vmnet0',
+ 'netmask': '255.255.255.0',
+ 'gateway': '10.10.10.1',
+ 'vlan': 100}
+ return FakeModel(fields)
+
+ def fake_instance_action_create(context, action):
+ """Stubs out the db.instance_action_create method."""
+ pass
+
+ def fake_instance_get_fixed_address(context, instance_id):
+ """Stubs out the db.instance_get_fixed_address method."""
+ return '10.10.10.10'
+
+ def fake_instance_type_get_all(context, inactive=0):
+ return INSTANCE_TYPES
+
+ def fake_instance_type_get_by_name(context, name):
+ return INSTANCE_TYPES[name]
+
+ stubs.Set(db, 'instance_create', fake_instance_create)
+ stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
+ stubs.Set(db, 'instance_action_create', fake_instance_action_create)
+ stubs.Set(db, 'instance_get_fixed_address',
+ fake_instance_get_fixed_address)
+ stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
+ stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
diff --git a/nova/tests/vmwareapi/stubs.py b/nova/tests/vmwareapi/stubs.py new file mode 100644 index 000000000..a648efb16 --- /dev/null +++ b/nova/tests/vmwareapi/stubs.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Stubouts for the test suite
+"""
+
+from nova.virt import vmwareapi_conn
+from nova.virt.vmwareapi import fake
+from nova.virt.vmwareapi import vmware_images
+
+
+def fake_get_vim_object(arg):
+ """Stubs out the VMWareAPISession's get_vim_object method."""
+ return fake.FakeVim()
+
+
+def fake_is_vim_object(arg, module):
+ """Stubs out the VMWareAPISession's is_vim_object method."""
+ return isinstance(module, fake.FakeVim)
+
+
+def set_stubs(stubs):
+ """Set the stubs."""
+ stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image)
+ stubs.Set(vmware_images, 'get_vmdk_size_and_properties',
+ fake.fake_get_vmdk_size_and_properties)
+ stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image)
+ stubs.Set(vmwareapi_conn.VMWareAPISession, "_get_vim_object",
+ fake_get_vim_object)
+ stubs.Set(vmwareapi_conn.VMWareAPISession, "_is_vim_object",
+ fake_is_vim_object)
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 7f9706a3d..7c33710c0 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -185,6 +185,25 @@ class FakeSessionForVMTests(fake.SessionBase): pass +def stub_out_vm_methods(stubs): + def fake_shutdown(self, inst, vm, method="clean"): + pass + + def fake_acquire_bootlock(self, vm): + pass + + def fake_release_bootlock(self, vm): + pass + + def fake_spawn_rescue(self, inst): + pass + + stubs.Set(vmops.VMOps, "_shutdown", fake_shutdown) + stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock) + stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock) + stubs.Set(vmops.VMOps, "spawn_rescue", fake_spawn_rescue) + + class FakeSessionForVolumeTests(fake.SessionBase): """ Stubs out a XenAPISession for Volume tests """ def __init__(self, uri): diff --git a/nova/utils.py b/nova/utils.py index e4d8a70eb..3f6f9fc8a 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -171,10 +171,6 @@ def execute(*cmd, **kwargs): stdout=stdout, stderr=stderr, cmd=' '.join(cmd)) - # NOTE(termie): this appears to be necessary to let the subprocess - # call clean something up in between calls, without - # it two execute calls in a row hangs the second one - greenthread.sleep(0) return result except ProcessExecutionError: if not attempts: @@ -183,6 +179,11 @@ def execute(*cmd, **kwargs): LOG.debug(_("%r failed. Retrying."), cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) + finally: + # NOTE(termie): this appears to be necessary to let the subprocess + # call clean something up in between calls, without + # it two execute calls in a row hangs the second one + greenthread.sleep(0) def ssh_execute(ssh, cmd, process_input=None, @@ -310,11 +311,15 @@ def get_my_linklocal(interface): def to_global_ipv6(prefix, mac): - mac64 = netaddr.EUI(mac).eui64().words - int_addr = int(''.join(['%02x' % i for i in mac64]), 16) - mac64_addr = netaddr.IPAddress(int_addr) - maskIP = netaddr.IPNetwork(prefix).ip - return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).format() + try: + mac64 = netaddr.EUI(mac).eui64().words + int_addr = int(''.join(['%02x' % i for i in mac64]), 16) + mac64_addr = netaddr.IPAddress(int_addr) + maskIP = netaddr.IPNetwork(prefix).ip + return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\ + format() + except TypeError: + raise TypeError(_("Bad mac for to_global_ipv6: %s") % mac) def to_mac(ipv6_address): @@ -336,11 +341,8 @@ utcnow.override_time = None def is_older_than(before, seconds): - """Return True if before is older than 'seconds'""" - if utcnow() - before > datetime.timedelta(seconds=seconds): - return True - else: - return False + """Return True if before is older than seconds""" + return utcnow() - before > datetime.timedelta(seconds=seconds) def utcnow_ts(): @@ -663,6 +665,48 @@ def get_from_path(items, path): return get_from_path(results, remainder) +def flatten_dict(dict_, flattened=None): + """Recursively flatten a nested dictionary""" + flattened = flattened or {} + for key, value in dict_.iteritems(): + if hasattr(value, 'iteritems'): + flatten_dict(value, flattened) + else: + flattened[key] = value + return flattened + + +def partition_dict(dict_, keys): + """Return two dicts, one containing only `keys` the other containing + everything but `keys` + """ + intersection = {} + difference = {} + for key, value in dict_.iteritems(): + if key in keys: + intersection[key] = value + else: + difference[key] = value + return intersection, difference + + +def map_dict_keys(dict_, key_map): + """Return a dictionary in which the dictionaries keys are mapped to + new keys. + """ + mapped = {} + for key, value in dict_.iteritems(): + mapped_key = key_map[key] if key in key_map else key + mapped[mapped_key] = value + return mapped + + +def subset_dict(dict_, keys): + """Return a dict that only contains a subset of keys""" + subset = partition_dict(dict_, keys)[0] + return subset + + def check_isinstance(obj, cls): """Checks that obj is of type cls, and lets PyLint infer types""" if isinstance(obj, cls): diff --git a/nova/virt/connection.py b/nova/virt/connection.py index af7001715..99a8849f1 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -26,9 +26,10 @@ from nova import log as logging from nova import utils from nova.virt import driver from nova.virt import fake +from nova.virt import hyperv from nova.virt import libvirt_conn +from nova.virt import vmwareapi_conn from nova.virt import xenapi_conn -from nova.virt import hyperv LOG = logging.getLogger("nova.virt.connection") @@ -68,6 +69,8 @@ def get_connection(read_only=False): conn = xenapi_conn.get_connection(read_only) elif t == 'hyperv': conn = hyperv.get_connection(read_only) + elif t == 'vmwareapi': + conn = vmwareapi_conn.get_connection(read_only) else: raise Exception('Unknown connection type "%s"' % t) diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 0e3a4aa3b..f9cf1b8aa 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -61,7 +61,7 @@ class ComputeDriver(object): """Return a list of InstanceInfo for all registered VMs""" raise NotImplementedError() - def spawn(self, instance): + def spawn(self, instance, network_info=None): """Launch a VM for the specified instance""" raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 5b0fe1877..7018f8c1b 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -344,7 +344,7 @@ class FakeConnection(driver.ComputeDriver): Note that this function takes an instance ID, not a compute.service.Instance, so that it can be called by compute.monitor. """ - return [0L, 0L, 0L, 0L, null] + return [0L, 0L, 0L, 0L, None] def interface_stats(self, instance_name, iface_id): """ diff --git a/nova/virt/interfaces.template b/nova/virt/interfaces.template index 3b34e54f4..e527cf35c 100644 --- a/nova/virt/interfaces.template +++ b/nova/virt/interfaces.template @@ -5,19 +5,20 @@ auto lo iface lo inet loopback -# The primary network interface -auto eth0 -iface eth0 inet static - address ${address} - netmask ${netmask} - broadcast ${broadcast} - gateway ${gateway} - dns-nameservers ${dns} +#for $ifc in $interfaces +auto ${ifc.name} +iface ${ifc.name} inet static + address ${ifc.address} + netmask ${ifc.netmask} + broadcast ${ifc.broadcast} + gateway ${ifc.gateway} + dns-nameservers ${ifc.dns} #if $use_ipv6 -iface eth0 inet6 static - address ${address_v6} - netmask ${netmask_v6} - gateway ${gateway_v6} +iface ${ifc.name} inet6 static + address ${ifc.address_v6} + netmask ${ifc.netmask_v6} + gateway ${ifc.gateway_v6} #end if +#end for diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index ef2d2cd6b..d74a9e85b 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -69,21 +69,24 @@ </disk> #end if #end if + +#for $nic in $nics <interface type='bridge'> - <source bridge='${bridge_name}'/> - <mac address='${mac_address}'/> + <source bridge='${nic.bridge_name}'/> + <mac address='${nic.mac_address}'/> <!-- <model type='virtio'/> CANT RUN virtio network right now --> - <filterref filter="nova-instance-${name}"> - <parameter name="IP" value="${ip_address}" /> - <parameter name="DHCPSERVER" value="${dhcp_server}" /> -#if $getVar('extra_params', False) - ${extra_params} + <filterref filter="nova-instance-${name}-${nic.id}"> + <parameter name="IP" value="${nic.ip_address}" /> + <parameter name="DHCPSERVER" value="${nic.dhcp_server}" /> +#if $getVar('nic.extra_params', False) + ${nic.extra_params} #end if -#if $getVar('gateway_v6', False) - <parameter name="RASERVER" value="${gateway_v6}" /> +#if $getVar('nic.gateway_v6', False) + <parameter name="RASERVER" value="${nic.gateway_v6}" /> #end if </filterref> </interface> +#end for <!-- The order is significant here. File must be defined first --> <serial type="file"> diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index e1a0a6f29..2cecb010d 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -153,6 +153,51 @@ def _get_ip_version(cidr): return int(net.version()) +def _get_network_info(instance): + # TODO(adiantum) If we will keep this function + # we should cache network_info + admin_context = context.get_admin_context() + + ip_addresses = db.fixed_ip_get_all_by_instance(admin_context, + instance['id']) + + networks = db.network_get_all_by_instance(admin_context, + instance['id']) + network_info = [] + + def ip_dict(ip): + return { + "ip": ip.address, + "netmask": network["netmask"], + "enabled": "1"} + + def ip6_dict(ip6): + prefix = ip6.network.cidr_v6 + mac = instance.mac_address + return { + "ip": utils.to_global_ipv6(prefix, mac), + "netmask": ip6.network.netmask_v6, + "gateway": ip6.network.gateway_v6, + "enabled": "1"} + + for network in networks: + network_ips = [ip for ip in ip_addresses + if ip.network_id == network.id] + + mapping = { + 'label': network['label'], + 'gateway': network['gateway'], + 'mac': instance.mac_address, + 'dns': [network['dns']], + 'ips': [ip_dict(ip) for ip in network_ips]} + + if FLAGS.use_ipv6: + mapping['ip6s'] = [ip6_dict(ip) for ip in network_ips] + + network_info.append((network, mapping)) + return network_info + + class LibvirtConnection(driver.ComputeDriver): def __init__(self, read_only): @@ -444,16 +489,18 @@ class LibvirtConnection(driver.ComputeDriver): def poll_rescued_instances(self, timeout): pass + # NOTE(ilyaalekseyev): Implementation like in multinics + # for xenapi(tr3buchet) @exception.wrap_exception - def spawn(self, instance): - xml = self.to_xml(instance) + def spawn(self, instance, network_info=None): + xml = self.to_xml(instance, network_info) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.NOSTATE, 'launching') - self.firewall_driver.setup_basic_filtering(instance) - self.firewall_driver.prepare_instance_filter(instance) - self._create_image(instance, xml) + self.firewall_driver.setup_basic_filtering(instance, network_info) + self.firewall_driver.prepare_instance_filter(instance, network_info) + self._create_image(instance, xml, network_info) self._conn.createXML(xml, 0) LOG.debug(_("instance %s: is running"), instance['name']) self.firewall_driver.apply_instance_filter(instance) @@ -609,7 +656,14 @@ class LibvirtConnection(driver.ComputeDriver): utils.execute('truncate', target, '-s', "%dG" % local_gb) # TODO(vish): should we format disk by default? - def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None): + def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None, + network_info=None): + if not network_info: + network_info = _get_network_info(inst) + + if not suffix: + suffix = '' + # syntactic nicety def basepath(fname='', suffix=suffix): return os.path.join(FLAGS.instances_path, @@ -685,28 +739,35 @@ class LibvirtConnection(driver.ComputeDriver): key = str(inst['key_data']) net = None - network_ref = db.network_get_by_instance(context.get_admin_context(), - inst['id']) - if network_ref['injected']: - admin_context = context.get_admin_context() - address = db.instance_get_fixed_address(admin_context, inst['id']) + + nets = [] + ifc_template = open(FLAGS.injected_network_template).read() + ifc_num = -1 + admin_context = context.get_admin_context() + for (network_ref, mapping) in network_info: + ifc_num += 1 + + if not 'injected' in network_ref: + continue + + address = mapping['ips'][0]['ip'] address_v6 = None if FLAGS.use_ipv6: - address_v6 = db.instance_get_fixed_address_v6(admin_context, - inst['id']) - - interfaces_info = {'address': address, - 'netmask': network_ref['netmask'], - 'gateway': network_ref['gateway'], - 'broadcast': network_ref['broadcast'], - 'dns': network_ref['dns'], - 'address_v6': address_v6, - 'gateway_v6': network_ref['gateway_v6'], - 'netmask_v6': network_ref['netmask_v6'], - 'use_ipv6': FLAGS.use_ipv6} - - net = str(Template(self.interfaces_xml, - searchList=[interfaces_info])) + address_v6 = mapping['ip6s'][0]['ip'] + net_info = {'name': 'eth%d' % ifc_num, + 'address': address, + 'netmask': network_ref['netmask'], + 'gateway': network_ref['gateway'], + 'broadcast': network_ref['broadcast'], + 'dns': network_ref['dns'], + 'address_v6': address_v6, + 'gateway_v6': network_ref['gateway_v6'], + 'netmask_v6': network_ref['netmask_v6'], + 'use_ipv6': FLAGS.use_ipv6} + nets.append(net_info) + + net = str(Template(ifc_template, searchList=[{'interfaces': nets}])) + if key or net: inst_name = inst['name'] img_id = inst.image_id @@ -728,20 +789,11 @@ class LibvirtConnection(driver.ComputeDriver): if FLAGS.libvirt_type == 'uml': utils.execute('sudo', 'chown', 'root', basepath('disk')) - def to_xml(self, instance, rescue=False): - # TODO(termie): cache? - LOG.debug(_('instance %s: starting toXML method'), instance['name']) - network = db.network_get_by_instance(context.get_admin_context(), - instance['id']) - # FIXME(vish): stick this in db - instance_type = instance['instance_type'] - # instance_type = test.INSTANCE_TYPES[instance_type] - instance_type = instance_types.get_instance_type(instance_type) - ip_address = db.instance_get_fixed_address(context.get_admin_context(), - instance['id']) + def _get_nic_for_xml(self, network, mapping): # Assume that the gateway also acts as the dhcp server. dhcp_server = network['gateway'] gateway_v6 = network['gateway_v6'] + mac_id = mapping['mac'].replace(':', '') if FLAGS.allow_project_net_traffic: if FLAGS.use_ipv6: @@ -766,6 +818,38 @@ class LibvirtConnection(driver.ComputeDriver): (net, mask) else: extra_params = "\n" + + result = { + 'id': mac_id, + 'bridge_name': network['bridge'], + 'mac_address': mapping['mac'], + 'ip_address': mapping['ips'][0]['ip'], + 'dhcp_server': dhcp_server, + 'extra_params': extra_params, + } + + if gateway_v6: + result['gateway_v6'] = gateway_v6 + "/128" + + return result + + def to_xml(self, instance, rescue=False, network_info=None): + # TODO(termie): cache? + LOG.debug(_('instance %s: starting toXML method'), instance['name']) + + # TODO(adiantum) remove network_info creation code + # when multinics will be completed + if not network_info: + network_info = _get_network_info(instance) + + nics = [] + for (network, mapping) in network_info: + nics.append(self._get_nic_for_xml(network, + mapping)) + # FIXME(vish): stick this in db + instance_type_name = instance['instance_type'] + instance_type = instance_types.get_instance_type(instance_type_name) + if FLAGS.use_cow_images: driver_type = 'qcow2' else: @@ -777,17 +861,11 @@ class LibvirtConnection(driver.ComputeDriver): instance['name']), 'memory_kb': instance_type['memory_mb'] * 1024, 'vcpus': instance_type['vcpus'], - 'bridge_name': network['bridge'], - 'mac_address': instance['mac_address'], - 'ip_address': ip_address, - 'dhcp_server': dhcp_server, - 'extra_params': extra_params, 'rescue': rescue, 'local': instance_type['local_gb'], - 'driver_type': driver_type} + 'driver_type': driver_type, + 'nics': nics} - if gateway_v6: - xml_info['gateway_v6'] = gateway_v6 + "/128" if not rescue: if instance['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" @@ -800,7 +878,6 @@ class LibvirtConnection(driver.ComputeDriver): xml = str(Template(self.libvirt_xml, searchList=[xml_info])) LOG.debug(_('instance %s: finished toXML method'), instance['name']) - return xml def get_info(self, instance_name): @@ -1008,7 +1085,18 @@ class LibvirtConnection(driver.ComputeDriver): """ - return self._conn.getVersion() + # NOTE(justinsb): getVersion moved between libvirt versions + # Trying to do be compatible with older versions is a lost cause + # But ... we can at least give the user a nice message + method = getattr(self._conn, 'getVersion', None) + if method is None: + raise exception.Error(_("libvirt version is too old" + " (does not support getVersion)")) + # NOTE(justinsb): If we wanted to get the version, we could: + # method = getattr(libvirt, 'getVersion', None) + # NOTE(justinsb): This would then rely on a proper version check + + return method() def get_cpu_info(self): """Get cpuinfo information. @@ -1305,7 +1393,7 @@ class LibvirtConnection(driver.ComputeDriver): class FirewallDriver(object): - def prepare_instance_filter(self, instance): + def prepare_instance_filter(self, instance, network_info=None): """Prepare filters for the instance. At this point, the instance isn't running yet.""" @@ -1339,7 +1427,7 @@ class FirewallDriver(object): the security group.""" raise NotImplementedError() - def setup_basic_filtering(self, instance): + def setup_basic_filtering(self, instance, network_info=None): """Create rules to block spoofing and allow dhcp. This gets called when spawning an instance, before @@ -1348,11 +1436,6 @@ class FirewallDriver(object): """ raise NotImplementedError() - def _gateway_v6_for_instance(self, instance): - network = db.network_get_by_instance(context.get_admin_context(), - instance['id']) - return network['gateway_v6'] - class NWFilterFirewall(FirewallDriver): """ @@ -1444,10 +1527,13 @@ class NWFilterFirewall(FirewallDriver): </rule> </filter>''' - def setup_basic_filtering(self, instance): + def setup_basic_filtering(self, instance, network_info=None): """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" logging.info('called setup_basic_filtering in nwfilter') + if not network_info: + network_info = _get_network_info(instance) + if self.handle_security_groups: # No point in setting up a filter set that we'll be overriding # anyway. @@ -1456,9 +1542,11 @@ class NWFilterFirewall(FirewallDriver): logging.info('ensuring static filters') self._ensure_static_filters() - instance_filter_name = self._instance_filter_name(instance) - self._define_filter(self._filter_container(instance_filter_name, - ['nova-base'])) + for (network, mapping) in network_info: + nic_id = mapping['mac'].replace(':', '') + instance_filter_name = self._instance_filter_name(instance, nic_id) + self._define_filter(self._filter_container(instance_filter_name, + ['nova-base'])) def _ensure_static_filters(self): if self.static_filters_configured: @@ -1549,48 +1637,60 @@ class NWFilterFirewall(FirewallDriver): # Nothing to do pass - def prepare_instance_filter(self, instance): + def prepare_instance_filter(self, instance, network_info=None): """ Creates an NWFilter for the given instance. In the process, it makes sure the filters for the security groups as well as the base filter are all in place. """ + if not network_info: + network_info = _get_network_info(instance) if instance['image_id'] == FLAGS.vpn_image_id: base_filter = 'nova-vpn' else: base_filter = 'nova-base' - instance_filter_name = self._instance_filter_name(instance) - instance_secgroup_filter_name = '%s-secgroup' % (instance_filter_name,) - instance_filter_children = [base_filter, instance_secgroup_filter_name] + ctxt = context.get_admin_context() + + instance_secgroup_filter_name = \ + '%s-secgroup' % (self._instance_filter_name(instance)) + #% (instance_filter_name,) + instance_secgroup_filter_children = ['nova-base-ipv4', 'nova-base-ipv6', 'nova-allow-dhcp-server'] - if FLAGS.use_ipv6: - gateway_v6 = self._gateway_v6_for_instance(instance) - if gateway_v6: - instance_secgroup_filter_children += ['nova-allow-ra-server'] - - ctxt = context.get_admin_context() - - if FLAGS.allow_project_net_traffic: - instance_filter_children += ['nova-project'] - if FLAGS.use_ipv6: - instance_filter_children += ['nova-project-v6'] - for security_group in db.security_group_get_by_instance(ctxt, - instance['id']): + for security_group in \ + db.security_group_get_by_instance(ctxt, instance['id']): self.refresh_security_group_rules(security_group['id']) instance_secgroup_filter_children += [('nova-secgroup-%s' % - security_group['id'])] + security_group['id'])] - self._define_filter( + self._define_filter( self._filter_container(instance_secgroup_filter_name, instance_secgroup_filter_children)) - self._define_filter( + for (network, mapping) in network_info: + nic_id = mapping['mac'].replace(':', '') + instance_filter_name = self._instance_filter_name(instance, nic_id) + instance_filter_children = \ + [base_filter, instance_secgroup_filter_name] + + if FLAGS.use_ipv6: + gateway_v6 = network['gateway_v6'] + + if gateway_v6: + instance_secgroup_filter_children += \ + ['nova-allow-ra-server'] + + if FLAGS.allow_project_net_traffic: + instance_filter_children += ['nova-project'] + if FLAGS.use_ipv6: + instance_filter_children += ['nova-project-v6'] + + self._define_filter( self._filter_container(instance_filter_name, instance_filter_children)) @@ -1638,8 +1738,10 @@ class NWFilterFirewall(FirewallDriver): xml += "chain='ipv4'>%s</filter>" % rule_xml return xml - def _instance_filter_name(self, instance): - return 'nova-instance-%s' % instance['name'] + def _instance_filter_name(self, instance, nic_id=None): + if not nic_id: + return 'nova-instance-%s' % (instance['name']) + return 'nova-instance-%s-%s' % (instance['name'], nic_id) class IptablesFirewallDriver(FirewallDriver): @@ -1654,9 +1756,11 @@ class IptablesFirewallDriver(FirewallDriver): self.iptables.ipv6['filter'].add_chain('sg-fallback') self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') - def setup_basic_filtering(self, instance): + def setup_basic_filtering(self, instance, network_info=None): """Use NWFilter from libvirt for this.""" - return self.nwfilter.setup_basic_filtering(instance) + if not network_info: + network_info = _get_network_info(instance) + return self.nwfilter.setup_basic_filtering(instance, network_info) def apply_instance_filter(self, instance): """No-op. Everything is done in prepare_instance_filter""" @@ -1670,29 +1774,40 @@ class IptablesFirewallDriver(FirewallDriver): LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) - def prepare_instance_filter(self, instance): + def prepare_instance_filter(self, instance, network_info=None): + if not network_info: + network_info = _get_network_info(instance) self.instances[instance['id']] = instance - self.add_filters_for_instance(instance) + self.add_filters_for_instance(instance, network_info) self.iptables.apply() - def add_filters_for_instance(self, instance): + def add_filters_for_instance(self, instance, network_info=None): + if not network_info: + network_info = _get_network_info(instance) chain_name = self._instance_chain_name(instance) self.iptables.ipv4['filter'].add_chain(chain_name) - ipv4_address = self._ip_for_instance(instance) - self.iptables.ipv4['filter'].add_rule('local', - '-d %s -j $%s' % - (ipv4_address, chain_name)) + + ips_v4 = [ip['ip'] for (_, mapping) in network_info + for ip in mapping['ips']] + + for ipv4_address in ips_v4: + self.iptables.ipv4['filter'].add_rule('local', + '-d %s -j $%s' % + (ipv4_address, chain_name)) if FLAGS.use_ipv6: self.iptables.ipv6['filter'].add_chain(chain_name) - ipv6_address = self._ip_for_instance_v6(instance) - self.iptables.ipv6['filter'].add_rule('local', - '-d %s -j $%s' % - (ipv6_address, - chain_name)) + ips_v6 = [ip['ip'] for (_, mapping) in network_info + for ip in mapping['ip6s']] + + for ipv6_address in ips_v6: + self.iptables.ipv6['filter'].add_rule('local', + '-d %s -j $%s' % + (ipv6_address, + chain_name)) - ipv4_rules, ipv6_rules = self.instance_rules(instance) + ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info) for rule in ipv4_rules: self.iptables.ipv4['filter'].add_rule(chain_name, rule) @@ -1708,7 +1823,9 @@ class IptablesFirewallDriver(FirewallDriver): if FLAGS.use_ipv6: self.iptables.ipv6['filter'].remove_chain(chain_name) - def instance_rules(self, instance): + def instance_rules(self, instance, network_info=None): + if not network_info: + network_info = _get_network_info(instance) ctxt = context.get_admin_context() ipv4_rules = [] @@ -1722,28 +1839,36 @@ class IptablesFirewallDriver(FirewallDriver): ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] - dhcp_server = self._dhcp_server_for_instance(instance) - ipv4_rules += ['-s %s -p udp --sport 67 --dport 68 ' - '-j ACCEPT' % (dhcp_server,)] + dhcp_servers = [network['gateway'] for (network, _m) in network_info] + + for dhcp_server in dhcp_servers: + ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' + '-j ACCEPT' % (dhcp_server,)) #Allow project network traffic if FLAGS.allow_project_net_traffic: - cidr = self._project_cidr_for_instance(instance) - ipv4_rules += ['-s %s -j ACCEPT' % (cidr,)] + cidrs = [network['cidr'] for (network, _m) in network_info] + for cidr in cidrs: + ipv4_rules.append('-s %s -j ACCEPT' % (cidr,)) # We wrap these in FLAGS.use_ipv6 because they might cause # a DB lookup. The other ones are just list operations, so # they're not worth the clutter. if FLAGS.use_ipv6: # Allow RA responses - gateway_v6 = self._gateway_v6_for_instance(instance) - if gateway_v6: - ipv6_rules += ['-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)] + gateways_v6 = [network['gateway_v6'] for (network, _) in + network_info] + for gateway_v6 in gateways_v6: + ipv6_rules.append( + '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)) #Allow project network traffic if FLAGS.allow_project_net_traffic: - cidrv6 = self._project_cidrv6_for_instance(instance) - ipv6_rules += ['-s %s -j ACCEPT' % (cidrv6,)] + cidrv6s = [network['cidr_v6'] for (network, _m) + in network_info] + + for cidrv6 in cidrv6s: + ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,)) security_groups = db.security_group_get_by_instance(ctxt, instance['id']) @@ -1825,31 +1950,3 @@ class IptablesFirewallDriver(FirewallDriver): def _instance_chain_name(self, instance): return 'inst-%s' % (instance['id'],) - - def _ip_for_instance(self, instance): - return db.instance_get_fixed_address(context.get_admin_context(), - instance['id']) - - def _ip_for_instance_v6(self, instance): - return db.instance_get_fixed_address_v6(context.get_admin_context(), - instance['id']) - - def _dhcp_server_for_instance(self, instance): - network = db.network_get_by_instance(context.get_admin_context(), - instance['id']) - return network['gateway'] - - def _gateway_v6_for_instance(self, instance): - network = db.network_get_by_instance(context.get_admin_context(), - instance['id']) - return network['gateway_v6'] - - def _project_cidr_for_instance(self, instance): - network = db.network_get_by_instance(context.get_admin_context(), - instance['id']) - return network['cidr'] - - def _project_cidrv6_for_instance(self, instance): - network = db.network_get_by_instance(context.get_admin_context(), - instance['id']) - return network['cidr_v6'] diff --git a/nova/virt/vmwareapi/__init__.py b/nova/virt/vmwareapi/__init__.py new file mode 100644 index 000000000..d9b27de08 --- /dev/null +++ b/nova/virt/vmwareapi/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +:mod:`vmwareapi` -- Nova support for VMware ESX/ESXi Server through VMware API. +""" diff --git a/nova/virt/vmwareapi/error_util.py b/nova/virt/vmwareapi/error_util.py new file mode 100644 index 000000000..53fa8f24d --- /dev/null +++ b/nova/virt/vmwareapi/error_util.py @@ -0,0 +1,96 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Exception classes and SOAP response error checking module.
+"""
+
+FAULT_NOT_AUTHENTICATED = "NotAuthenticated"
+FAULT_ALREADY_EXISTS = "AlreadyExists"
+
+
+class VimException(Exception):
+ """The VIM Exception class."""
+
+ def __init__(self, exception_summary, excep):
+ Exception.__init__(self)
+ self.exception_summary = exception_summary
+ self.exception_obj = excep
+
+ def __str__(self):
+ return self.exception_summary + str(self.exception_obj)
+
+
+class SessionOverLoadException(VimException):
+ """Session Overload Exception."""
+ pass
+
+
+class VimAttributeError(VimException):
+ """VI Attribute Error."""
+ pass
+
+
+class VimFaultException(Exception):
+ """The VIM Fault exception class."""
+
+ def __init__(self, fault_list, excep):
+ Exception.__init__(self)
+ self.fault_list = fault_list
+ self.exception_obj = excep
+
+ def __str__(self):
+ return str(self.exception_obj)
+
+
+class FaultCheckers(object):
+ """
+ Methods for fault checking of SOAP response. Per Method error handlers
+ for which we desire error checking are defined. SOAP faults are
+ embedded in the SOAP messages as properties and not as SOAP faults.
+ """
+
+ @staticmethod
+ def retrieveproperties_fault_checker(resp_obj):
+ """
+ Checks the RetrieveProperties response for errors. Certain faults
+ are sent as part of the SOAP body as property of missingSet.
+ For example NotAuthenticated fault.
+ """
+ fault_list = []
+ if not resp_obj:
+ # This is the case when the session has timed out. ESX SOAP server
+ # sends an empty RetrievePropertiesResponse. Normally missingSet in
+ # the returnval field has the specifics about the error, but that's
+ # not the case with a timed out idle session. It is as bad as a
+ # terminated session for we cannot use the session. So setting
+ # fault to NotAuthenticated fault.
+ fault_list = ["NotAuthenticated"]
+ else:
+ for obj_cont in resp_obj:
+ if hasattr(obj_cont, "missingSet"):
+ for missing_elem in obj_cont.missingSet:
+ fault_type = \
+ missing_elem.fault.fault.__class__.__name__
+ # Fault needs to be added to the type of fault for
+ # uniformity in error checking as SOAP faults define
+ fault_list.append(fault_type)
+ if fault_list:
+ exc_msg_list = ', '.join(fault_list)
+ raise VimFaultException(fault_list, Exception(_("Error(s) %s "
+ "occurred in the call to RetrieveProperties") %
+ exc_msg_list))
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py new file mode 100644 index 000000000..4bb467fa9 --- /dev/null +++ b/nova/virt/vmwareapi/fake.py @@ -0,0 +1,711 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A fake VMWare VI API implementation.
+"""
+
+from pprint import pformat
+import uuid
+
+from nova import exception
+from nova import log as logging
+from nova.virt.vmwareapi import vim
+from nova.virt.vmwareapi import error_util
+
+_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
+ 'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
+ 'files']
+
+_FAKE_FILE_SIZE = 1024
+
+_db_content = {}
+
+LOG = logging.getLogger("nova.virt.vmwareapi.fake")
+
+
+def log_db_contents(msg=None):
+ """Log DB Contents."""
+ text = msg or ""
+ content = pformat(_db_content)
+ LOG.debug(_("%(text)s: _db_content => %(content)s") % locals())
+
+
+def reset():
+ """Resets the db contents."""
+ for c in _CLASSES:
+ # We fake the datastore by keeping the file references as a list of
+ # names in the db
+ if c == 'files':
+ _db_content[c] = []
+ else:
+ _db_content[c] = {}
+ create_network()
+ create_host_network_system()
+ create_host()
+ create_datacenter()
+ create_datastore()
+ create_res_pool()
+
+
+def cleanup():
+ """Clear the db contents."""
+ for c in _CLASSES:
+ _db_content[c] = {}
+
+
+def _create_object(table, table_obj):
+ """Create an object in the db."""
+ _db_content[table][table_obj.obj] = table_obj
+
+
+def _get_objects(obj_type):
+ """Get objects of the type."""
+ lst_objs = []
+ for key in _db_content[obj_type]:
+ lst_objs.append(_db_content[obj_type][key])
+ return lst_objs
+
+
+class Prop(object):
+ """Property Object base class."""
+
+ def __init__(self):
+ self.name = None
+ self.val = None
+
+
+class ManagedObject(object):
+ """Managed Data Object base class."""
+
+ def __init__(self, name="ManagedObject", obj_ref=None):
+ """Sets the obj property which acts as a reference to the object."""
+ super(ManagedObject, self).__setattr__('objName', name)
+ if obj_ref is None:
+ obj_ref = str(uuid.uuid4())
+ object.__setattr__(self, 'obj', obj_ref)
+ object.__setattr__(self, 'propSet', [])
+
+ def set(self, attr, val):
+ """
+ Sets an attribute value. Not using the __setattr__ directly for we
+ want to set attributes of the type 'a.b.c' and using this function
+ class we set the same.
+ """
+ self.__setattr__(attr, val)
+
+ def get(self, attr):
+ """
+ Gets an attribute. Used as an intermediary to get nested
+ property like 'a.b.c' value.
+ """
+ return self.__getattr__(attr)
+
+ def __setattr__(self, attr, val):
+ for prop in self.propSet:
+ if prop.name == attr:
+ prop.val = val
+ return
+ elem = Prop()
+ elem.name = attr
+ elem.val = val
+ self.propSet.append(elem)
+
+ def __getattr__(self, attr):
+ for elem in self.propSet:
+ if elem.name == attr:
+ return elem.val
+ raise exception.Error(_("Property %(attr)s not set for the managed "
+ "object %(objName)s") %
+ {'attr': attr,
+ 'objName': self.objName})
+
+
+class DataObject(object):
+ """Data object base class."""
+ pass
+
+
+class VirtualDisk(DataObject):
+ """
+ Virtual Disk class. Does nothing special except setting
+ __class__.__name__ to 'VirtualDisk'. Refer place where __class__.__name__
+ is used in the code.
+ """
+ pass
+
+
+class VirtualDiskFlatVer2BackingInfo(DataObject):
+ """VirtualDiskFlatVer2BackingInfo class."""
+ pass
+
+
+class VirtualLsiLogicController(DataObject):
+ """VirtualLsiLogicController class."""
+ pass
+
+
+class VirtualMachine(ManagedObject):
+ """Virtual Machine class."""
+
+ def __init__(self, **kwargs):
+ super(VirtualMachine, self).__init__("VirtualMachine")
+ self.set("name", kwargs.get("name"))
+ self.set("runtime.connectionState",
+ kwargs.get("conn_state", "connected"))
+ self.set("summary.config.guestId", kwargs.get("guest", "otherGuest"))
+ ds_do = DataObject()
+ ds_do.ManagedObjectReference = [kwargs.get("ds").obj]
+ self.set("datastore", ds_do)
+ self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus",
+ "toolsOk"))
+ self.set("summary.guest.toolsRunningStatus", kwargs.get(
+ "toolsrunningstate", "guestToolsRunning"))
+ self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn"))
+ self.set("config.files.vmPathName", kwargs.get("vmPathName"))
+ self.set("summary.config.numCpu", kwargs.get("numCpu", 1))
+ self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
+ self.set("config.hardware.device", kwargs.get("virtual_disk", None))
+ self.set("config.extraConfig", kwargs.get("extra_config", None))
+
+ def reconfig(self, factory, val):
+ """
+ Called to reconfigure the VM. Actually customizes the property
+ setting of the Virtual Machine object.
+ """
+ try:
+ # Case of Reconfig of VM to attach disk
+ controller_key = val.deviceChange[1].device.controllerKey
+ filename = val.deviceChange[1].device.backing.fileName
+
+ disk = VirtualDisk()
+ disk.controllerKey = controller_key
+
+ disk_backing = VirtualDiskFlatVer2BackingInfo()
+ disk_backing.fileName = filename
+ disk_backing.key = -101
+ disk.backing = disk_backing
+
+ controller = VirtualLsiLogicController()
+ controller.key = controller_key
+
+ self.set("config.hardware.device", [disk, controller])
+ except AttributeError:
+ # Case of Reconfig of VM to set extra params
+ self.set("config.extraConfig", val.extraConfig)
+
+
+class Network(ManagedObject):
+ """Network class."""
+
+ def __init__(self):
+ super(Network, self).__init__("Network")
+ self.set("summary.name", "vmnet0")
+
+
+class ResourcePool(ManagedObject):
+ """Resource Pool class."""
+
+ def __init__(self):
+ super(ResourcePool, self).__init__("ResourcePool")
+ self.set("name", "ResPool")
+
+
+class Datastore(ManagedObject):
+ """Datastore class."""
+
+ def __init__(self):
+ super(Datastore, self).__init__("Datastore")
+ self.set("summary.type", "VMFS")
+ self.set("summary.name", "fake-ds")
+
+
+class HostNetworkSystem(ManagedObject):
+ """HostNetworkSystem class."""
+
+ def __init__(self):
+ super(HostNetworkSystem, self).__init__("HostNetworkSystem")
+ self.set("name", "networkSystem")
+
+ pnic_do = DataObject()
+ pnic_do.device = "vmnic0"
+
+ net_info_pnic = DataObject()
+ net_info_pnic.PhysicalNic = [pnic_do]
+
+ self.set("networkInfo.pnic", net_info_pnic)
+
+
+class HostSystem(ManagedObject):
+ """Host System class."""
+
+ def __init__(self):
+ super(HostSystem, self).__init__("HostSystem")
+ self.set("name", "ha-host")
+ if _db_content.get("HostNetworkSystem", None) is None:
+ create_host_network_system()
+ host_net_key = _db_content["HostNetworkSystem"].keys()[0]
+ host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
+ self.set("configManager.networkSystem", host_net_sys)
+
+ if _db_content.get("Network", None) is None:
+ create_network()
+ net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
+ network_do = DataObject()
+ network_do.ManagedObjectReference = [net_ref]
+ self.set("network", network_do)
+
+ vswitch_do = DataObject()
+ vswitch_do.pnic = ["vmnic0"]
+ vswitch_do.name = "vSwitch0"
+ vswitch_do.portgroup = ["PortGroup-vmnet0"]
+
+ net_swicth = DataObject()
+ net_swicth.HostVirtualSwitch = [vswitch_do]
+ self.set("config.network.vswitch", net_swicth)
+
+ host_pg_do = DataObject()
+ host_pg_do.key = "PortGroup-vmnet0"
+
+ pg_spec = DataObject()
+ pg_spec.vlanId = 0
+ pg_spec.name = "vmnet0"
+
+ host_pg_do.spec = pg_spec
+
+ host_pg = DataObject()
+ host_pg.HostPortGroup = [host_pg_do]
+ self.set("config.network.portgroup", host_pg)
+
+ def _add_port_group(self, spec):
+ """Adds a port group to the host system object in the db."""
+ pg_name = spec.name
+ vswitch_name = spec.vswitchName
+ vlanid = spec.vlanId
+
+ vswitch_do = DataObject()
+ vswitch_do.pnic = ["vmnic0"]
+ vswitch_do.name = vswitch_name
+ vswitch_do.portgroup = ["PortGroup-%s" % pg_name]
+
+ vswitches = self.get("config.network.vswitch").HostVirtualSwitch
+ vswitches.append(vswitch_do)
+
+ host_pg_do = DataObject()
+ host_pg_do.key = "PortGroup-%s" % pg_name
+
+ pg_spec = DataObject()
+ pg_spec.vlanId = vlanid
+ pg_spec.name = pg_name
+
+ host_pg_do.spec = pg_spec
+ host_pgrps = self.get("config.network.portgroup").HostPortGroup
+ host_pgrps.append(host_pg_do)
+
+
+class Datacenter(ManagedObject):
+ """Datacenter class."""
+
+ def __init__(self):
+ super(Datacenter, self).__init__("Datacenter")
+ self.set("name", "ha-datacenter")
+ self.set("vmFolder", "vm_folder_ref")
+ if _db_content.get("Network", None) is None:
+ create_network()
+ net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
+ network_do = DataObject()
+ network_do.ManagedObjectReference = [net_ref]
+ self.set("network", network_do)
+
+
+class Task(ManagedObject):
+ """Task class."""
+
+ def __init__(self, task_name, state="running"):
+ super(Task, self).__init__("Task")
+ info = DataObject
+ info.name = task_name
+ info.state = state
+ self.set("info", info)
+
+
+def create_host_network_system():
+ host_net_system = HostNetworkSystem()
+ _create_object("HostNetworkSystem", host_net_system)
+
+
+def create_host():
+ host_system = HostSystem()
+ _create_object('HostSystem', host_system)
+
+
+def create_datacenter():
+ data_center = Datacenter()
+ _create_object('Datacenter', data_center)
+
+
+def create_datastore():
+ data_store = Datastore()
+ _create_object('Datastore', data_store)
+
+
+def create_res_pool():
+ res_pool = ResourcePool()
+ _create_object('ResourcePool', res_pool)
+
+
+def create_network():
+ network = Network()
+ _create_object('Network', network)
+
+
+def create_task(task_name, state="running"):
+ task = Task(task_name, state)
+ _create_object("Task", task)
+ return task
+
+
+def _add_file(file_path):
+ """Adds a file reference to the db."""
+ _db_content["files"].append(file_path)
+
+
+def _remove_file(file_path):
+ """Removes a file reference from the db."""
+ if _db_content.get("files") is None:
+ raise exception.NotFound(_("No files have been added yet"))
+ # Check if the remove is for a single file object or for a folder
+ if file_path.find(".vmdk") != -1:
+ if file_path not in _db_content.get("files"):
+ raise exception.NotFound(_("File- '%s' is not there in the "
+ "datastore") % file_path)
+ _db_content.get("files").remove(file_path)
+ else:
+ # Removes the files in the folder and the folder too from the db
+ for file in _db_content.get("files"):
+ if file.find(file_path) != -1:
+ lst_files = _db_content.get("files")
+ if lst_files and lst_files.count(file):
+ lst_files.remove(file)
+
+
+def fake_fetch_image(image, instance, **kwargs):
+ """Fakes fetch image call. Just adds a reference to the db for the file."""
+ ds_name = kwargs.get("datastore_name")
+ file_path = kwargs.get("file_path")
+ ds_file_path = "[" + ds_name + "] " + file_path
+ _add_file(ds_file_path)
+
+
+def fake_upload_image(image, instance, **kwargs):
+ """Fakes the upload of an image."""
+ pass
+
+
+def fake_get_vmdk_size_and_properties(image_id, instance):
+ """Fakes the file size and properties fetch for the image file."""
+ props = {"vmware_ostype": "otherGuest",
+ "vmware_adaptertype": "lsiLogic"}
+ return _FAKE_FILE_SIZE, props
+
+
+def _get_vm_mdo(vm_ref):
+ """Gets the Virtual Machine with the ref from the db."""
+ if _db_content.get("VirtualMachine", None) is None:
+ raise exception.NotFound(_("There is no VM registered"))
+ if vm_ref not in _db_content.get("VirtualMachine"):
+ raise exception.NotFound(_("Virtual Machine with ref %s is not "
+ "there") % vm_ref)
+ return _db_content.get("VirtualMachine")[vm_ref]
+
+
+class FakeFactory(object):
+ """Fake factory class for the suds client."""
+
+ def create(self, obj_name):
+ """Creates a namespace object."""
+ return DataObject()
+
+
+class FakeVim(object):
+ """Fake VIM Class."""
+
+ def __init__(self, protocol="https", host="localhost", trace=None):
+ """
+ Initializes the suds client object, sets the service content
+ contents and the cookies for the session.
+ """
+ self._session = None
+ self.client = DataObject()
+ self.client.factory = FakeFactory()
+
+ transport = DataObject()
+ transport.cookiejar = "Fake-CookieJar"
+ options = DataObject()
+ options.transport = transport
+
+ self.client.options = options
+
+ service_content = self.client.factory.create('ns0:ServiceContent')
+ service_content.propertyCollector = "PropCollector"
+ service_content.virtualDiskManager = "VirtualDiskManager"
+ service_content.fileManager = "FileManager"
+ service_content.rootFolder = "RootFolder"
+ service_content.sessionManager = "SessionManager"
+ self._service_content = service_content
+
+ def get_service_content(self):
+ return self._service_content
+
+ def __repr__(self):
+ return "Fake VIM Object"
+
+ def __str__(self):
+ return "Fake VIM Object"
+
+ def _login(self):
+ """Logs in and sets the session object in the db."""
+ self._session = str(uuid.uuid4())
+ session = DataObject()
+ session.key = self._session
+ _db_content['session'][self._session] = session
+ return session
+
+ def _logout(self):
+ """Logs out and remove the session object ref from the db."""
+ s = self._session
+ self._session = None
+ if s not in _db_content['session']:
+ raise exception.Error(
+ _("Logging out a session that is invalid or already logged "
+ "out: %s") % s)
+ del _db_content['session'][s]
+
+ def _terminate_session(self, *args, **kwargs):
+ """Terminates a session."""
+ s = kwargs.get("sessionId")[0]
+ if s not in _db_content['session']:
+ return
+ del _db_content['session'][s]
+
+ def _check_session(self):
+ """Checks if the session is active."""
+ if (self._session is None or self._session not in
+ _db_content['session']):
+ LOG.debug(_("Session is faulty"))
+ raise error_util.VimFaultException(
+ [error_util.FAULT_NOT_AUTHENTICATED],
+ _("Session Invalid"))
+
+ def _create_vm(self, method, *args, **kwargs):
+ """Creates and registers a VM object with the Host System."""
+ config_spec = kwargs.get("config")
+ ds = _db_content["Datastore"][_db_content["Datastore"].keys()[0]]
+ vm_dict = {"name": config_spec.name,
+ "ds": ds,
+ "powerstate": "poweredOff",
+ "vmPathName": config_spec.files.vmPathName,
+ "numCpu": config_spec.numCPUs,
+ "mem": config_spec.memoryMB}
+ virtual_machine = VirtualMachine(**vm_dict)
+ _create_object("VirtualMachine", virtual_machine)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _reconfig_vm(self, method, *args, **kwargs):
+ """Reconfigures a VM and sets the properties supplied."""
+ vm_ref = args[0]
+ vm_mdo = _get_vm_mdo(vm_ref)
+ vm_mdo.reconfig(self.client.factory, kwargs.get("spec"))
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _create_copy_disk(self, method, vmdk_file_path):
+ """Creates/copies a vmdk file object in the datastore."""
+ # We need to add/create both .vmdk and .-flat.vmdk files
+ flat_vmdk_file_path = \
+ vmdk_file_path.replace(".vmdk", "-flat.vmdk")
+ _add_file(vmdk_file_path)
+ _add_file(flat_vmdk_file_path)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _snapshot_vm(self, method):
+ """Snapshots a VM. Here we do nothing for faking sake."""
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _delete_disk(self, method, *args, **kwargs):
+ """Deletes .vmdk and -flat.vmdk files corresponding to the VM."""
+ vmdk_file_path = kwargs.get("name")
+ flat_vmdk_file_path = \
+ vmdk_file_path.replace(".vmdk", "-flat.vmdk")
+ _remove_file(vmdk_file_path)
+ _remove_file(flat_vmdk_file_path)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _delete_file(self, method, *args, **kwargs):
+ """Deletes a file from the datastore."""
+ _remove_file(kwargs.get("name"))
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _just_return(self):
+ """Fakes a return."""
+ return
+
+ def _unregister_vm(self, method, *args, **kwargs):
+ """Unregisters a VM from the Host System."""
+ vm_ref = args[0]
+ _get_vm_mdo(vm_ref)
+ del _db_content["VirtualMachine"][vm_ref]
+
+ def _search_ds(self, method, *args, **kwargs):
+ """Searches the datastore for a file."""
+ ds_path = kwargs.get("datastorePath")
+ if _db_content.get("files", None) is None:
+ raise exception.NotFound(_("No files have been added yet"))
+ for file in _db_content.get("files"):
+ if file.find(ds_path) != -1:
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+ task_mdo = create_task(method, "error")
+ return task_mdo.obj
+
+ def _make_dir(self, method, *args, **kwargs):
+ """Creates a directory in the datastore."""
+ ds_path = kwargs.get("name")
+ if _db_content.get("files", None) is None:
+ raise exception.NotFound(_("No files have been added yet"))
+ _db_content["files"].append(ds_path)
+
+ def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
+ """Sets power state for the VM."""
+ if _db_content.get("VirtualMachine", None) is None:
+ raise exception.NotFound(_(" No Virtual Machine has been "
+ "registered yet"))
+ if vm_ref not in _db_content.get("VirtualMachine"):
+ raise exception.NotFound(_("Virtual Machine with ref %s is not "
+ "there") % vm_ref)
+ vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
+ vm_mdo.set("runtime.powerState", pwr_state)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _retrieve_properties(self, method, *args, **kwargs):
+ """Retrieves properties based on the type."""
+ spec_set = kwargs.get("specSet")[0]
+ type = spec_set.propSet[0].type
+ properties = spec_set.propSet[0].pathSet
+ objs = spec_set.objectSet
+ lst_ret_objs = []
+ for obj in objs:
+ try:
+ obj_ref = obj.obj
+ # This means that we are doing a search for the managed
+ # dataobjects of the type in the inventory
+ if obj_ref == "RootFolder":
+ for mdo_ref in _db_content[type]:
+ mdo = _db_content[type][mdo_ref]
+ # Create a temp Managed object which has the same ref
+ # as the parent object and copies just the properties
+ # asked for. We need .obj along with the propSet of
+ # just the properties asked for
+ temp_mdo = ManagedObject(mdo.objName, mdo.obj)
+ for prop in properties:
+ temp_mdo.set(prop, mdo.get(prop))
+ lst_ret_objs.append(temp_mdo)
+ else:
+ if obj_ref in _db_content[type]:
+ mdo = _db_content[type][obj_ref]
+ temp_mdo = ManagedObject(mdo.objName, obj_ref)
+ for prop in properties:
+ temp_mdo.set(prop, mdo.get(prop))
+ lst_ret_objs.append(temp_mdo)
+ except Exception, exc:
+ LOG.exception(exc)
+ continue
+ return lst_ret_objs
+
+ def _add_port_group(self, method, *args, **kwargs):
+ """Adds a port group to the host system."""
+ host_mdo = \
+ _db_content["HostSystem"][_db_content["HostSystem"].keys()[0]]
+ host_mdo._add_port_group(kwargs.get("portgrp"))
+
+ def __getattr__(self, attr_name):
+ if attr_name != "Login":
+ self._check_session()
+ if attr_name == "Login":
+ return lambda *args, **kwargs: self._login()
+ elif attr_name == "Logout":
+ self._logout()
+ elif attr_name == "TerminateSession":
+ return lambda *args, **kwargs: self._terminate_session(
+ *args, **kwargs)
+ elif attr_name == "CreateVM_Task":
+ return lambda *args, **kwargs: self._create_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "ReconfigVM_Task":
+ return lambda *args, **kwargs: self._reconfig_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "CreateVirtualDisk_Task":
+ return lambda *args, **kwargs: self._create_copy_disk(attr_name,
+ kwargs.get("name"))
+ elif attr_name == "DeleteDatastoreFile_Task":
+ return lambda *args, **kwargs: self._delete_file(attr_name,
+ *args, **kwargs)
+ elif attr_name == "PowerOnVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOn")
+ elif attr_name == "PowerOffVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOff")
+ elif attr_name == "RebootGuest":
+ return lambda *args, **kwargs: self._just_return()
+ elif attr_name == "ResetVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOn")
+ elif attr_name == "SuspendVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "suspended")
+ elif attr_name == "CreateSnapshot_Task":
+ return lambda *args, **kwargs: self._snapshot_vm(attr_name)
+ elif attr_name == "CopyVirtualDisk_Task":
+ return lambda *args, **kwargs: self._create_copy_disk(attr_name,
+ kwargs.get("destName"))
+ elif attr_name == "DeleteVirtualDisk_Task":
+ return lambda *args, **kwargs: self._delete_disk(attr_name,
+ *args, **kwargs)
+ elif attr_name == "UnregisterVM":
+ return lambda *args, **kwargs: self._unregister_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "SearchDatastore_Task":
+ return lambda *args, **kwargs: self._search_ds(attr_name,
+ *args, **kwargs)
+ elif attr_name == "MakeDirectory":
+ return lambda *args, **kwargs: self._make_dir(attr_name,
+ *args, **kwargs)
+ elif attr_name == "RetrieveProperties":
+ return lambda *args, **kwargs: self._retrieve_properties(
+ attr_name, *args, **kwargs)
+ elif attr_name == "AcquireCloneTicket":
+ return lambda *args, **kwargs: self._just_return()
+ elif attr_name == "AddPortGroup":
+ return lambda *args, **kwargs: self._add_port_group(attr_name,
+ *args, **kwargs)
diff --git a/nova/virt/vmwareapi/io_util.py b/nova/virt/vmwareapi/io_util.py new file mode 100644 index 000000000..2ec773b7b --- /dev/null +++ b/nova/virt/vmwareapi/io_util.py @@ -0,0 +1,168 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility classes for defining the time saving transfer of data from the reader
+to the write using a LightQueue as a Pipe between the reader and the writer.
+"""
+
+from eventlet import event
+from eventlet import greenthread
+from eventlet.queue import LightQueue
+
+from glance import client
+
+from nova import exception
+from nova import log as logging
+
+LOG = logging.getLogger("nova.virt.vmwareapi.io_util")
+
+IO_THREAD_SLEEP_TIME = .01
+GLANCE_POLL_INTERVAL = 5
+
+
+class ThreadSafePipe(LightQueue):
+ """The pipe to hold the data which the reader writes to and the writer
+ reads from."""
+
+ def __init__(self, maxsize, transfer_size):
+ LightQueue.__init__(self, maxsize)
+ self.transfer_size = transfer_size
+ self.transferred = 0
+
+ def read(self, chunk_size):
+ """Read data from the pipe. Chunksize if ignored for we have ensured
+ that the data chunks written to the pipe by readers is the same as the
+ chunks asked for by the Writer."""
+ if self.transferred < self.transfer_size:
+ data_item = self.get()
+ self.transferred += len(data_item)
+ return data_item
+ else:
+ return ""
+
+ def write(self, data):
+ """Put a data item in the pipe."""
+ self.put(data)
+
+ def close(self):
+ """A place-holder to maintain consistency."""
+ pass
+
+
+class GlanceWriteThread(object):
+ """Ensures that image data is written to in the glance client and that
+ it is in correct ('active')state."""
+
+ def __init__(self, input, glance_client, image_id, image_meta={}):
+ self.input = input
+ self.glance_client = glance_client
+ self.image_id = image_id
+ self.image_meta = image_meta
+ self._running = False
+
+ def start(self):
+ self.done = event.Event()
+
+ def _inner():
+ """Function to do the image data transfer through an update
+ and thereon checks if the state is 'active'."""
+ self.glance_client.update_image(self.image_id,
+ image_meta=self.image_meta,
+ image_data=self.input)
+ self._running = True
+ while self._running:
+ try:
+ image_status = \
+ self.glance_client.get_image_meta(self.image_id).get(
+ "status")
+ if image_status == "active":
+ self.stop()
+ self.done.send(True)
+ # If the state is killed, then raise an exception.
+ elif image_status == "killed":
+ self.stop()
+ exc_msg = _("Glance image %s is in killed state") %\
+ self.image_id
+ LOG.exception(exc_msg)
+ self.done.send_exception(exception.Error(exc_msg))
+ elif image_status in ["saving", "queued"]:
+ greenthread.sleep(GLANCE_POLL_INTERVAL)
+ else:
+ self.stop()
+ exc_msg = _("Glance image "
+ "%(image_id)s is in unknown state "
+ "- %(state)s") % {
+ "image_id": self.image_id,
+ "state": image_status}
+ LOG.exception(exc_msg)
+ self.done.send_exception(exception.Error(exc_msg))
+ except Exception, exc:
+ self.stop()
+ self.done.send_exception(exc)
+
+ greenthread.spawn(_inner)
+ return self.done
+
+ def stop(self):
+ self._running = False
+
+ def wait(self):
+ return self.done.wait()
+
+ def close(self):
+ pass
+
+
+class IOThread(object):
+ """Class that reads chunks from the input file and writes them to the
+ output file till the transfer is completely done."""
+
+ def __init__(self, input, output):
+ self.input = input
+ self.output = output
+ self._running = False
+ self.got_exception = False
+
+ def start(self):
+ self.done = event.Event()
+
+ def _inner():
+ """Read data from the input and write the same to the output
+ until the transfer completes."""
+ self._running = True
+ while self._running:
+ try:
+ data = self.input.read(None)
+ if not data:
+ self.stop()
+ self.done.send(True)
+ self.output.write(data)
+ greenthread.sleep(IO_THREAD_SLEEP_TIME)
+ except Exception, exc:
+ self.stop()
+ LOG.exception(exc)
+ self.done.send_exception(exc)
+
+ greenthread.spawn(_inner)
+ return self.done
+
+ def stop(self):
+ self._running = False
+
+ def wait(self):
+ return self.done.wait()
diff --git a/nova/virt/vmwareapi/network_utils.py b/nova/virt/vmwareapi/network_utils.py new file mode 100644 index 000000000..e77842535 --- /dev/null +++ b/nova/virt/vmwareapi/network_utils.py @@ -0,0 +1,149 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility functions for ESX Networking.
+"""
+
+from nova import exception
+from nova import log as logging
+from nova.virt.vmwareapi import error_util
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+
+LOG = logging.getLogger("nova.virt.vmwareapi.network_utils")
+
+
+def get_network_with_the_name(session, network_name="vmnet0"):
+ """
+ Gets reference to the network whose name is passed as the
+ argument.
+ """
+ hostsystems = session._call_method(vim_util, "get_objects",
+ "HostSystem", ["network"])
+ vm_networks_ret = hostsystems[0].propSet[0].val
+ # Meaning there are no networks on the host. suds responds with a ""
+ # in the parent property field rather than a [] in the
+ # ManagedObjectRefernce property field of the parent
+ if not vm_networks_ret:
+ return None
+ vm_networks = vm_networks_ret.ManagedObjectReference
+ networks = session._call_method(vim_util,
+ "get_properties_for_a_collection_of_objects",
+ "Network", vm_networks, ["summary.name"])
+ for network in networks:
+ if network.propSet[0].val == network_name:
+ return network.obj
+ return None
+
+
+def get_vswitch_for_vlan_interface(session, vlan_interface):
+ """
+ Gets the vswitch associated with the physical network adapter
+ with the name supplied.
+ """
+ # Get the list of vSwicthes on the Host System
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ vswitches_ret = session._call_method(vim_util,
+ "get_dynamic_property", host_mor,
+ "HostSystem", "config.network.vswitch")
+ # Meaning there are no vSwitches on the host. Shouldn't be the case,
+ # but just doing code check
+ if not vswitches_ret:
+ return
+ vswitches = vswitches_ret.HostVirtualSwitch
+ # Get the vSwitch associated with the network adapter
+ for elem in vswitches:
+ try:
+ for nic_elem in elem.pnic:
+ if str(nic_elem).split('-')[-1].find(vlan_interface) != -1:
+ return elem.name
+ # Catching Attribute error as a vSwitch may not be associated with a
+ # physical NIC.
+ except AttributeError:
+ pass
+
+
+def check_if_vlan_interface_exists(session, vlan_interface):
+ """Checks if the vlan_inteface exists on the esx host."""
+ host_net_system_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem", ["configManager.networkSystem"])[0].propSet[0].val
+ physical_nics_ret = session._call_method(vim_util,
+ "get_dynamic_property", host_net_system_mor,
+ "HostNetworkSystem", "networkInfo.pnic")
+ # Meaning there are no physical nics on the host
+ if not physical_nics_ret:
+ return False
+ physical_nics = physical_nics_ret.PhysicalNic
+ for pnic in physical_nics:
+ if vlan_interface == pnic.device:
+ return True
+ return False
+
+
+def get_vlanid_and_vswitch_for_portgroup(session, pg_name):
+ """Get the vlan id and vswicth associated with the port group."""
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ port_grps_on_host_ret = session._call_method(vim_util,
+ "get_dynamic_property", host_mor,
+ "HostSystem", "config.network.portgroup")
+ if not port_grps_on_host_ret:
+ excep = ("ESX SOAP server returned an empty port group "
+ "for the host system in its response")
+ LOG.exception(excep)
+ raise exception.Error(_(excep))
+ port_grps_on_host = port_grps_on_host_ret.HostPortGroup
+ for p_gp in port_grps_on_host:
+ if p_gp.spec.name == pg_name:
+ p_grp_vswitch_name = p_gp.vswitch.split("-")[-1]
+ return p_gp.spec.vlanId, p_grp_vswitch_name
+
+
+def create_port_group(session, pg_name, vswitch_name, vlan_id=0):
+ """
+ Creates a port group on the host system with the vlan tags
+ supplied. VLAN id 0 means no vlan id association.
+ """
+ client_factory = session._get_vim().client.factory
+ add_prt_grp_spec = vm_util.get_add_vswitch_port_group_spec(
+ client_factory,
+ vswitch_name,
+ pg_name,
+ vlan_id)
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ network_system_mor = session._call_method(vim_util,
+ "get_dynamic_property", host_mor,
+ "HostSystem", "configManager.networkSystem")
+ LOG.debug(_("Creating Port Group with name %s on "
+ "the ESX host") % pg_name)
+ try:
+ session._call_method(session._get_vim(),
+ "AddPortGroup", network_system_mor,
+ portgrp=add_prt_grp_spec)
+ except error_util.VimFaultException, exc:
+ # There can be a race condition when two instances try
+ # adding port groups at the same time. One succeeds, then
+ # the other one will get an exception. Since we are
+ # concerned with the port group being created, which is done
+ # by the other call, we can ignore the exception.
+ if error_util.FAULT_ALREADY_EXISTS not in exc.fault_list:
+ raise exception.Error(exc)
+ LOG.debug(_("Created Port Group with name %s on "
+ "the ESX host") % pg_name)
diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py new file mode 100644 index 000000000..84f4942eb --- /dev/null +++ b/nova/virt/vmwareapi/read_write_util.py @@ -0,0 +1,182 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Classes to handle image files
+
+Collection of classes to handle image upload/download to/from Image service
+(like Glance image storage and retrieval service) from/to ESX/ESXi server.
+
+"""
+
+import httplib
+import urllib
+import urllib2
+import urlparse
+
+from eventlet import event
+from eventlet import greenthread
+
+from glance import client
+
+from nova import flags
+from nova import log as logging
+
+LOG = logging.getLogger("nova.virt.vmwareapi.read_write_util")
+
+FLAGS = flags.FLAGS
+
+USER_AGENT = "OpenStack-ESX-Adapter"
+
+try:
+ READ_CHUNKSIZE = client.BaseClient.CHUNKSIZE
+except AttributeError:
+ READ_CHUNKSIZE = 65536
+
+
+class GlanceFileRead(object):
+ """Glance file read handler class."""
+
+ def __init__(self, glance_read_iter):
+ self.glance_read_iter = glance_read_iter
+ self.iter = self.get_next()
+
+ def read(self, chunk_size):
+ """Read an item from the queue. The chunk size is ignored for the
+ Client ImageBodyIterator uses its own CHUNKSIZE."""
+ try:
+ return self.iter.next()
+ except StopIteration:
+ return ""
+
+ def get_next(self):
+ """Get the next item from the image iterator."""
+ for data in self.glance_read_iter:
+ yield data
+
+ def close(self):
+ """A dummy close just to maintain consistency."""
+ pass
+
+
+class VMwareHTTPFile(object):
+ """Base class for HTTP file."""
+
+ def __init__(self, file_handle):
+ self.eof = False
+ self.file_handle = file_handle
+
+ def set_eof(self, eof):
+ """Set the end of file marker."""
+ self.eof = eof
+
+ def get_eof(self):
+ """Check if the end of file has been reached."""
+ return self.eof
+
+ def close(self):
+ """Close the file handle."""
+ try:
+ self.file_handle.close()
+ except Exception, exc:
+ LOG.exception(exc)
+
+ def __del__(self):
+ """Close the file handle on garbage collection."""
+ self.close()
+
+ def _build_vim_cookie_headers(self, vim_cookies):
+ """Build ESX host session cookie headers."""
+ cookie_header = ""
+ for vim_cookie in vim_cookies:
+ cookie_header = vim_cookie.name + "=" + vim_cookie.value
+ break
+ return cookie_header
+
+ def write(self, data):
+ """Write data to the file."""
+ raise NotImplementedError
+
+ def read(self, chunk_size):
+ """Read a chunk of data."""
+ raise NotImplementedError
+
+ def get_size(self):
+ """Get size of the file to be read."""
+ raise NotImplementedError
+
+
+class VMWareHTTPWriteFile(VMwareHTTPFile):
+ """VMWare file write handler class."""
+
+ def __init__(self, host, data_center_name, datastore_name, cookies,
+ file_path, file_size, scheme="https"):
+ base_url = "%s://%s/folder/%s" % (scheme, host, file_path)
+ param_list = {"dcPath": data_center_name, "dsName": datastore_name}
+ base_url = base_url + "?" + urllib.urlencode(param_list)
+ (scheme, netloc, path, params, query, fragment) = \
+ urlparse.urlparse(base_url)
+ if scheme == "http":
+ conn = httplib.HTTPConnection(netloc)
+ elif scheme == "https":
+ conn = httplib.HTTPSConnection(netloc)
+ conn.putrequest("PUT", path + "?" + query)
+ conn.putheader("User-Agent", USER_AGENT)
+ conn.putheader("Content-Length", file_size)
+ conn.putheader("Cookie", self._build_vim_cookie_headers(cookies))
+ conn.endheaders()
+ self.conn = conn
+ VMwareHTTPFile.__init__(self, conn)
+
+ def write(self, data):
+ """Write to the file."""
+ self.file_handle.send(data)
+
+ def close(self):
+ """Get the response and close the connection."""
+ try:
+ self.conn.getresponse()
+ except Exception, excep:
+ LOG.debug(_("Exception during HTTP connection close in "
+ "VMWareHTTpWrite. Exception is %s") % excep)
+ super(VMWareHTTPWriteFile, self).close()
+
+
+class VmWareHTTPReadFile(VMwareHTTPFile):
+ """VMWare file read handler class."""
+
+ def __init__(self, host, data_center_name, datastore_name, cookies,
+ file_path, scheme="https"):
+ base_url = "%s://%s/folder/%s" % (scheme, host,
+ urllib.pathname2url(file_path))
+ param_list = {"dcPath": data_center_name, "dsName": datastore_name}
+ base_url = base_url + "?" + urllib.urlencode(param_list)
+ headers = {'User-Agent': USER_AGENT,
+ 'Cookie': self._build_vim_cookie_headers(cookies)}
+ request = urllib2.Request(base_url, None, headers)
+ conn = urllib2.urlopen(request)
+ VMwareHTTPFile.__init__(self, conn)
+
+ def read(self, chunk_size):
+ """Read a chunk of data."""
+ # We are ignoring the chunk size passed for we want the pipe to hold
+ # data items of the chunk-size that Glance Client uses for read
+ # while writing.
+ return self.file_handle.read(READ_CHUNKSIZE)
+
+ def get_size(self):
+ """Get size of the file to be read."""
+ return self.file_handle.headers.get("Content-Length", -1)
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py new file mode 100644 index 000000000..ba14f1512 --- /dev/null +++ b/nova/virt/vmwareapi/vim.py @@ -0,0 +1,176 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Classes for making VMware VI SOAP calls.
+"""
+
+import httplib
+
+from suds import WebFault
+from suds.client import Client
+from suds.plugin import MessagePlugin
+from suds.sudsobject import Property
+
+from nova import flags
+from nova.virt.vmwareapi import error_util
+
+RESP_NOT_XML_ERROR = 'Response is "text/html", not "text/xml"'
+CONN_ABORT_ERROR = 'Software caused connection abort'
+ADDRESS_IN_USE_ERROR = 'Address already in use'
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vmwareapi_wsdl_loc',
+ None,
+ 'VIM Service WSDL Location'
+ 'e.g http://<server>/vimService.wsdl'
+ 'Due to a bug in vSphere ESX 4.1 default wsdl'
+ 'Refer readme-vmware to setup')
+
+
+class VIMMessagePlugin(MessagePlugin):
+
+ def addAttributeForValue(self, node):
+ # suds does not handle AnyType properly.
+ # VI SDK requires type attribute to be set when AnyType is used
+ if node.name == 'value':
+ node.set('xsi:type', 'xsd:string')
+
+ def marshalled(self, context):
+ """suds will send the specified soap envelope.
+ Provides the plugin with the opportunity to prune empty
+ nodes and fixup nodes before sending it to the server.
+ """
+ # suds builds the entire request object based on the wsdl schema.
+ # VI SDK throws server errors if optional SOAP nodes are sent without
+ # values, e.g. <test/> as opposed to <test>test</test>
+ context.envelope.prune()
+ context.envelope.walk(self.addAttributeForValue)
+
+
+class Vim:
+ """The VIM Object."""
+
+ def __init__(self,
+ protocol="https",
+ host="localhost"):
+ """
+ Creates the necessary Communication interfaces and gets the
+ ServiceContent for initiating SOAP transactions.
+
+ protocol: http or https
+ host : ESX IPAddress[:port] or ESX Hostname[:port]
+ """
+ self._protocol = protocol
+ self._host_name = host
+ wsdl_url = FLAGS.vmwareapi_wsdl_loc
+ if wsdl_url is None:
+ raise Exception(_("Must specify vmwareapi_wsdl_loc"))
+ # TODO(sateesh): Use this when VMware fixes their faulty wsdl
+ #wsdl_url = '%s://%s/sdk/vimService.wsdl' % (self._protocol,
+ # self._host_name)
+ url = '%s://%s/sdk' % (self._protocol, self._host_name)
+ self.client = Client(wsdl_url, location=url,
+ plugins=[VIMMessagePlugin()])
+ self._service_content = \
+ self.RetrieveServiceContent("ServiceInstance")
+
+ def get_service_content(self):
+ """Gets the service content object."""
+ return self._service_content
+
+ def __getattr__(self, attr_name):
+ """Makes the API calls and gets the result."""
+ try:
+ return object.__getattr__(self, attr_name)
+ except AttributeError:
+
+ def vim_request_handler(managed_object, **kwargs):
+ """
+ Builds the SOAP message and parses the response for fault
+ checking and other errors.
+
+ managed_object : Managed Object Reference or Managed
+ Object Name
+ **kwargs : Keyword arguments of the call
+ """
+ # Dynamic handler for VI SDK Calls
+ try:
+ request_mo = \
+ self._request_managed_object_builder(managed_object)
+ request = getattr(self.client.service, attr_name)
+ response = request(request_mo, **kwargs)
+ # To check for the faults that are part of the message body
+ # and not returned as Fault object response from the ESX
+ # SOAP server
+ if hasattr(error_util.FaultCheckers,
+ attr_name.lower() + "_fault_checker"):
+ fault_checker = getattr(error_util.FaultCheckers,
+ attr_name.lower() + "_fault_checker")
+ fault_checker(response)
+ return response
+ # Catch the VimFaultException that is raised by the fault
+ # check of the SOAP response
+ except error_util.VimFaultException, excep:
+ raise
+ except WebFault, excep:
+ doc = excep.document
+ detail = doc.childAtPath("/Envelope/Body/Fault/detail")
+ fault_list = []
+ for child in detail.getChildren():
+ fault_list.append(child.get("type"))
+ raise error_util.VimFaultException(fault_list, excep)
+ except AttributeError, excep:
+ raise error_util.VimAttributeError(_("No such SOAP method "
+ "'%s' provided by VI SDK") % (attr_name), excep)
+ except (httplib.CannotSendRequest,
+ httplib.ResponseNotReady,
+ httplib.CannotSendHeader), excep:
+ raise error_util.SessionOverLoadException(_("httplib "
+ "error in %s: ") % (attr_name), excep)
+ except Exception, excep:
+ # Socket errors which need special handling for they
+ # might be caused by ESX API call overload
+ if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
+ str(excep).find(CONN_ABORT_ERROR)) != -1:
+ raise error_util.SessionOverLoadException(_("Socket "
+ "error in %s: ") % (attr_name), excep)
+ # Type error that needs special handling for it might be
+ # caused by ESX host API call overload
+ elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
+ raise error_util.SessionOverLoadException(_("Type "
+ "error in %s: ") % (attr_name), excep)
+ else:
+ raise error_util.VimException(
+ _("Exception in %s ") % (attr_name), excep)
+ return vim_request_handler
+
+ def _request_managed_object_builder(self, managed_object):
+ """Builds the request managed object."""
+ # Request Managed Object Builder
+ if type(managed_object) == type(""):
+ mo = Property(managed_object)
+ mo._type = managed_object
+ else:
+ mo = managed_object
+ return mo
+
+ def __repr__(self):
+ return "VIM Object"
+
+ def __str__(self):
+ return "VIM Object"
diff --git a/nova/virt/vmwareapi/vim_util.py b/nova/virt/vmwareapi/vim_util.py new file mode 100644 index 000000000..11214231c --- /dev/null +++ b/nova/virt/vmwareapi/vim_util.py @@ -0,0 +1,217 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+The VMware API utility module.
+"""
+
+
+def build_selection_spec(client_factory, name):
+ """Builds the selection spec."""
+ sel_spec = client_factory.create('ns0:SelectionSpec')
+ sel_spec.name = name
+ return sel_spec
+
+
+def build_traversal_spec(client_factory, name, spec_type, path, skip,
+ select_set):
+ """Builds the traversal spec object."""
+ traversal_spec = client_factory.create('ns0:TraversalSpec')
+ traversal_spec.name = name
+ traversal_spec.type = spec_type
+ traversal_spec.path = path
+ traversal_spec.skip = skip
+ traversal_spec.selectSet = select_set
+ return traversal_spec
+
+
+def build_recursive_traversal_spec(client_factory):
+ """
+ Builds the Recursive Traversal Spec to traverse the object managed
+ object hierarchy.
+ """
+ visit_folders_select_spec = build_selection_spec(client_factory,
+ "visitFolders")
+ # For getting to hostFolder from datacenter
+ dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter",
+ "hostFolder", False,
+ [visit_folders_select_spec])
+ # For getting to vmFolder from datacenter
+ dc_to_vmf = build_traversal_spec(client_factory, "dc_to_vmf", "Datacenter",
+ "vmFolder", False,
+ [visit_folders_select_spec])
+ # For getting Host System to virtual machine
+ h_to_vm = build_traversal_spec(client_factory, "h_to_vm", "HostSystem",
+ "vm", False,
+ [visit_folders_select_spec])
+
+ # For getting to Host System from Compute Resource
+ cr_to_h = build_traversal_spec(client_factory, "cr_to_h",
+ "ComputeResource", "host", False, [])
+
+ # For getting to datastore from Compute Resource
+ cr_to_ds = build_traversal_spec(client_factory, "cr_to_ds",
+ "ComputeResource", "datastore", False, [])
+
+ rp_to_rp_select_spec = build_selection_spec(client_factory, "rp_to_rp")
+ rp_to_vm_select_spec = build_selection_spec(client_factory, "rp_to_vm")
+ # For getting to resource pool from Compute Resource
+ cr_to_rp = build_traversal_spec(client_factory, "cr_to_rp",
+ "ComputeResource", "resourcePool", False,
+ [rp_to_rp_select_spec, rp_to_vm_select_spec])
+
+ # For getting to child res pool from the parent res pool
+ rp_to_rp = build_traversal_spec(client_factory, "rp_to_rp", "ResourcePool",
+ "resourcePool", False,
+ [rp_to_rp_select_spec, rp_to_vm_select_spec])
+
+ # For getting to Virtual Machine from the Resource Pool
+ rp_to_vm = build_traversal_spec(client_factory, "rp_to_vm", "ResourcePool",
+ "vm", False,
+ [rp_to_rp_select_spec, rp_to_vm_select_spec])
+
+ # Get the assorted traversal spec which takes care of the objects to
+ # be searched for from the root folder
+ traversal_spec = build_traversal_spec(client_factory, "visitFolders",
+ "Folder", "childEntity", False,
+ [visit_folders_select_spec, dc_to_hf,
+ dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp,
+ rp_to_rp, h_to_vm, rp_to_vm])
+ return traversal_spec
+
+
+def build_property_spec(client_factory, type="VirtualMachine",
+ properties_to_collect=["name"],
+ all_properties=False):
+ """Builds the Property Spec."""
+ property_spec = client_factory.create('ns0:PropertySpec')
+ property_spec.all = all_properties
+ property_spec.pathSet = properties_to_collect
+ property_spec.type = type
+ return property_spec
+
+
+def build_object_spec(client_factory, root_folder, traversal_specs):
+ """Builds the object Spec."""
+ object_spec = client_factory.create('ns0:ObjectSpec')
+ object_spec.obj = root_folder
+ object_spec.skip = False
+ object_spec.selectSet = traversal_specs
+ return object_spec
+
+
+def build_property_filter_spec(client_factory, property_specs, object_specs):
+ """Builds the Property Filter Spec."""
+ property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
+ property_filter_spec.propSet = property_specs
+ property_filter_spec.objectSet = object_specs
+ return property_filter_spec
+
+
+def get_object_properties(vim, collector, mobj, type, properties):
+ """Gets the properties of the Managed object specified."""
+ client_factory = vim.client.factory
+ if mobj is None:
+ return None
+ usecoll = collector
+ if usecoll is None:
+ usecoll = vim.get_service_content().propertyCollector
+ property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
+ property_spec = client_factory.create('ns0:PropertySpec')
+ property_spec.all = (properties is None or len(properties) == 0)
+ property_spec.pathSet = properties
+ property_spec.type = type
+ object_spec = client_factory.create('ns0:ObjectSpec')
+ object_spec.obj = mobj
+ object_spec.skip = False
+ property_filter_spec.propSet = [property_spec]
+ property_filter_spec.objectSet = [object_spec]
+ return vim.RetrieveProperties(usecoll, specSet=[property_filter_spec])
+
+
+def get_dynamic_property(vim, mobj, type, property_name):
+ """Gets a particular property of the Managed Object."""
+ obj_content = \
+ get_object_properties(vim, None, mobj, type, [property_name])
+ property_value = None
+ if obj_content:
+ dynamic_property = obj_content[0].propSet
+ if dynamic_property:
+ property_value = dynamic_property[0].val
+ return property_value
+
+
+def get_objects(vim, type, properties_to_collect=["name"], all=False):
+ """Gets the list of objects of the type specified."""
+ client_factory = vim.client.factory
+ object_spec = build_object_spec(client_factory,
+ vim.get_service_content().rootFolder,
+ [build_recursive_traversal_spec(client_factory)])
+ property_spec = build_property_spec(client_factory, type=type,
+ properties_to_collect=properties_to_collect,
+ all_properties=all)
+ property_filter_spec = build_property_filter_spec(client_factory,
+ [property_spec],
+ [object_spec])
+ return vim.RetrieveProperties(vim.get_service_content().propertyCollector,
+ specSet=[property_filter_spec])
+
+
+def get_prop_spec(client_factory, spec_type, properties):
+ """Builds the Property Spec Object."""
+ prop_spec = client_factory.create('ns0:PropertySpec')
+ prop_spec.type = spec_type
+ prop_spec.pathSet = properties
+ return prop_spec
+
+
+def get_obj_spec(client_factory, obj, select_set=None):
+ """Builds the Object Spec object."""
+ obj_spec = client_factory.create('ns0:ObjectSpec')
+ obj_spec.obj = obj
+ obj_spec.skip = False
+ if select_set is not None:
+ obj_spec.selectSet = select_set
+ return obj_spec
+
+
+def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
+ """Builds the Property Filter Spec Object."""
+ prop_filter_spec = \
+ client_factory.create('ns0:PropertyFilterSpec')
+ prop_filter_spec.propSet = prop_spec
+ prop_filter_spec.objectSet = obj_spec
+ return prop_filter_spec
+
+
+def get_properties_for_a_collection_of_objects(vim, type,
+ obj_list, properties):
+ """
+ Gets the list of properties for the collection of
+ objects of the type specified.
+ """
+ client_factory = vim.client.factory
+ if len(obj_list) == 0:
+ return []
+ prop_spec = get_prop_spec(client_factory, type, properties)
+ lst_obj_specs = []
+ for obj in obj_list:
+ lst_obj_specs.append(get_obj_spec(client_factory, obj))
+ prop_filter_spec = get_prop_filter_spec(client_factory,
+ lst_obj_specs, [prop_spec])
+ return vim.RetrieveProperties(vim.get_service_content().propertyCollector,
+ specSet=[prop_filter_spec])
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py new file mode 100644 index 000000000..a2fa7600c --- /dev/null +++ b/nova/virt/vmwareapi/vm_util.py @@ -0,0 +1,306 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+The VMware API VM utility module to build SOAP object specs.
+"""
+
+
+def build_datastore_path(datastore_name, path):
+ """Build the datastore compliant path."""
+ return "[%s] %s" % (datastore_name, path)
+
+
+def split_datastore_path(datastore_path):
+ """
+ Split the VMWare style datastore path to get the Datastore
+ name and the entity path.
+ """
+ spl = datastore_path.split('[', 1)[1].split(']', 1)
+ path = ""
+ if len(spl) == 1:
+ datastore_url = spl[0]
+ else:
+ datastore_url, path = spl
+ return datastore_url, path.strip()
+
+
+def get_vm_create_spec(client_factory, instance, data_store_name,
+ network_name="vmnet0",
+ os_type="otherGuest"):
+ """Builds the VM Create spec."""
+ config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
+ config_spec.name = instance.name
+ config_spec.guestId = os_type
+
+ vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
+ vm_file_info.vmPathName = "[" + data_store_name + "]"
+ config_spec.files = vm_file_info
+
+ tools_info = client_factory.create('ns0:ToolsConfigInfo')
+ tools_info.afterPowerOn = True
+ tools_info.afterResume = True
+ tools_info.beforeGuestStandby = True
+ tools_info.beforeGuestShutdown = True
+ tools_info.beforeGuestReboot = True
+
+ config_spec.tools = tools_info
+ config_spec.numCPUs = int(instance.vcpus)
+ config_spec.memoryMB = int(instance.memory_mb)
+
+ nic_spec = create_network_spec(client_factory,
+ network_name, instance.mac_address)
+
+ device_config_spec = [nic_spec]
+
+ config_spec.deviceChange = device_config_spec
+ return config_spec
+
+
+def create_controller_spec(client_factory, key):
+ """
+ Builds a Config Spec for the LSI Logic Controller's addition
+ which acts as the controller for the virtual hard disk to be attached
+ to the VM.
+ """
+ # Create a controller for the Virtual Hard Disk
+ virtual_device_config = \
+ client_factory.create('ns0:VirtualDeviceConfigSpec')
+ virtual_device_config.operation = "add"
+ virtual_lsi = \
+ client_factory.create('ns0:VirtualLsiLogicController')
+ virtual_lsi.key = key
+ virtual_lsi.busNumber = 0
+ virtual_lsi.sharedBus = "noSharing"
+ virtual_device_config.device = virtual_lsi
+ return virtual_device_config
+
+
+def create_network_spec(client_factory, network_name, mac_address):
+ """
+ Builds a config spec for the addition of a new network
+ adapter to the VM.
+ """
+ network_spec = \
+ client_factory.create('ns0:VirtualDeviceConfigSpec')
+ network_spec.operation = "add"
+
+ # Get the recommended card type for the VM based on the guest OS of the VM
+ net_device = client_factory.create('ns0:VirtualPCNet32')
+
+ backing = \
+ client_factory.create('ns0:VirtualEthernetCardNetworkBackingInfo')
+ backing.deviceName = network_name
+
+ connectable_spec = \
+ client_factory.create('ns0:VirtualDeviceConnectInfo')
+ connectable_spec.startConnected = True
+ connectable_spec.allowGuestControl = True
+ connectable_spec.connected = True
+
+ net_device.connectable = connectable_spec
+ net_device.backing = backing
+
+ # The Server assigns a Key to the device. Here we pass a -ve temporary key.
+ # -ve because actual keys are +ve numbers and we don't
+ # want a clash with the key that server might associate with the device
+ net_device.key = -47
+ net_device.addressType = "manual"
+ net_device.macAddress = mac_address
+ net_device.wakeOnLanEnabled = True
+
+ network_spec.device = net_device
+ return network_spec
+
+
+def get_vmdk_attach_config_spec(client_factory, disksize, file_path,
+ adapter_type="lsiLogic"):
+ """Builds the vmdk attach config spec."""
+ config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
+
+ # The controller Key pertains to the Key of the LSI Logic Controller, which
+ # controls this Hard Disk
+ device_config_spec = []
+ # For IDE devices, there are these two default controllers created in the
+ # VM having keys 200 and 201
+ if adapter_type == "ide":
+ controller_key = 200
+ else:
+ controller_key = -101
+ controller_spec = create_controller_spec(client_factory,
+ controller_key)
+ device_config_spec.append(controller_spec)
+ virtual_device_config_spec = create_virtual_disk_spec(client_factory,
+ disksize, controller_key, file_path)
+
+ device_config_spec.append(virtual_device_config_spec)
+
+ config_spec.deviceChange = device_config_spec
+ return config_spec
+
+
+def get_vmdk_file_path_and_adapter_type(client_factory, hardware_devices):
+ """Gets the vmdk file path and the storage adapter type."""
+ if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
+ hardware_devices = hardware_devices.VirtualDevice
+ vmdk_file_path = None
+ vmdk_controler_key = None
+
+ adapter_type_dict = {}
+ for device in hardware_devices:
+ if device.__class__.__name__ == "VirtualDisk" and \
+ device.backing.__class__.__name__ \
+ == "VirtualDiskFlatVer2BackingInfo":
+ vmdk_file_path = device.backing.fileName
+ vmdk_controler_key = device.controllerKey
+ elif device.__class__.__name__ == "VirtualLsiLogicController":
+ adapter_type_dict[device.key] = "lsiLogic"
+ elif device.__class__.__name__ == "VirtualBusLogicController":
+ adapter_type_dict[device.key] = "busLogic"
+ elif device.__class__.__name__ == "VirtualIDEController":
+ adapter_type_dict[device.key] = "ide"
+ elif device.__class__.__name__ == "VirtualLsiLogicSASController":
+ adapter_type_dict[device.key] = "lsiLogic"
+
+ adapter_type = adapter_type_dict.get(vmdk_controler_key, "")
+
+ return vmdk_file_path, adapter_type
+
+
+def get_copy_virtual_disk_spec(client_factory, adapter_type="lsilogic"):
+ """Builds the Virtual Disk copy spec."""
+ dest_spec = client_factory.create('ns0:VirtualDiskSpec')
+ dest_spec.adapterType = adapter_type
+ dest_spec.diskType = "thick"
+ return dest_spec
+
+
+def get_vmdk_create_spec(client_factory, size_in_kb, adapter_type="lsiLogic"):
+ """Builds the virtual disk create spec."""
+ create_vmdk_spec = \
+ client_factory.create('ns0:FileBackedVirtualDiskSpec')
+ create_vmdk_spec.adapterType = adapter_type
+ create_vmdk_spec.diskType = "thick"
+ create_vmdk_spec.capacityKb = size_in_kb
+ return create_vmdk_spec
+
+
+def create_virtual_disk_spec(client_factory, disksize, controller_key,
+ file_path=None):
+ """
+ Builds spec for the creation of a new/ attaching of an already existing
+ Virtual Disk to the VM.
+ """
+ virtual_device_config = \
+ client_factory.create('ns0:VirtualDeviceConfigSpec')
+ virtual_device_config.operation = "add"
+ if file_path is None:
+ virtual_device_config.fileOperation = "create"
+
+ virtual_disk = client_factory.create('ns0:VirtualDisk')
+
+ disk_file_backing = \
+ client_factory.create('ns0:VirtualDiskFlatVer2BackingInfo')
+ disk_file_backing.diskMode = "persistent"
+ disk_file_backing.thinProvisioned = False
+ if file_path is not None:
+ disk_file_backing.fileName = file_path
+ else:
+ disk_file_backing.fileName = ""
+
+ connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
+ connectable_spec.startConnected = True
+ connectable_spec.allowGuestControl = False
+ connectable_spec.connected = True
+
+ virtual_disk.backing = disk_file_backing
+ virtual_disk.connectable = connectable_spec
+
+ # The Server assigns a Key to the device. Here we pass a -ve random key.
+ # -ve because actual keys are +ve numbers and we don't
+ # want a clash with the key that server might associate with the device
+ virtual_disk.key = -100
+ virtual_disk.controllerKey = controller_key
+ virtual_disk.unitNumber = 0
+ virtual_disk.capacityInKB = disksize
+
+ virtual_device_config.device = virtual_disk
+
+ return virtual_device_config
+
+
+def get_dummy_vm_create_spec(client_factory, name, data_store_name):
+ """Builds the dummy VM create spec."""
+ config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
+
+ config_spec.name = name
+ config_spec.guestId = "otherGuest"
+
+ vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
+ vm_file_info.vmPathName = "[" + data_store_name + "]"
+ config_spec.files = vm_file_info
+
+ tools_info = client_factory.create('ns0:ToolsConfigInfo')
+ tools_info.afterPowerOn = True
+ tools_info.afterResume = True
+ tools_info.beforeGuestStandby = True
+ tools_info.beforeGuestShutdown = True
+ tools_info.beforeGuestReboot = True
+
+ config_spec.tools = tools_info
+ config_spec.numCPUs = 1
+ config_spec.memoryMB = 4
+
+ controller_key = -101
+ controller_spec = create_controller_spec(client_factory, controller_key)
+ disk_spec = create_virtual_disk_spec(client_factory, 1024, controller_key)
+
+ device_config_spec = [controller_spec, disk_spec]
+
+ config_spec.deviceChange = device_config_spec
+ return config_spec
+
+
+def get_machine_id_change_spec(client_factory, mac, ip_addr, netmask, gateway):
+ """Builds the machine id change config spec."""
+ machine_id_str = "%s;%s;%s;%s" % (mac, ip_addr, netmask, gateway)
+ virtual_machine_config_spec = \
+ client_factory.create('ns0:VirtualMachineConfigSpec')
+
+ opt = client_factory.create('ns0:OptionValue')
+ opt.key = "machine.id"
+ opt.value = machine_id_str
+ virtual_machine_config_spec.extraConfig = [opt]
+ return virtual_machine_config_spec
+
+
+def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
+ port_group_name, vlan_id):
+ """Builds the virtual switch port group add spec."""
+ vswitch_port_group_spec = client_factory.create('ns0:HostPortGroupSpec')
+ vswitch_port_group_spec.name = port_group_name
+ vswitch_port_group_spec.vswitchName = vswitch_name
+
+ # VLAN ID of 0 means that VLAN tagging is not to be done for the network.
+ vswitch_port_group_spec.vlanId = int(vlan_id)
+
+ policy = client_factory.create('ns0:HostNetworkPolicy')
+ nicteaming = client_factory.create('ns0:HostNicTeamingPolicy')
+ nicteaming.notifySwitches = True
+ policy.nicTeaming = nicteaming
+
+ vswitch_port_group_spec.policy = policy
+ return vswitch_port_group_spec
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py new file mode 100644 index 000000000..cf6c88bbd --- /dev/null +++ b/nova/virt/vmwareapi/vmops.py @@ -0,0 +1,789 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Class for VM tasks like spawn, snapshot, suspend, resume etc.
+"""
+
+import base64
+import os
+import time
+import urllib
+import urllib2
+import uuid
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.compute import power_state
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import vmware_images
+from nova.virt.vmwareapi import network_utils
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.virt.vmwareapi.vmops")
+
+VMWARE_POWER_STATES = {
+ 'poweredOff': power_state.SHUTDOWN,
+ 'poweredOn': power_state.RUNNING,
+ 'suspended': power_state.PAUSED}
+
+
+class VMWareVMOps(object):
+ """Management class for VM-related tasks."""
+
+ def __init__(self, session):
+ """Initializer."""
+ self._session = session
+
+ def _wait_with_callback(self, instance_id, task, callback):
+ """Waits for the task to finish and does a callback after."""
+ ret = None
+ try:
+ ret = self._session._wait_for_task(instance_id, task)
+ except Exception, excep:
+ LOG.exception(excep)
+ callback(ret)
+
+ def list_instances(self):
+ """Lists the VM instances that are registered with the ESX host."""
+ LOG.debug(_("Getting list of instances"))
+ vms = self._session._call_method(vim_util, "get_objects",
+ "VirtualMachine",
+ ["name", "runtime.connectionState"])
+ lst_vm_names = []
+ for vm in vms:
+ vm_name = None
+ conn_state = None
+ for prop in vm.propSet:
+ if prop.name == "name":
+ vm_name = prop.val
+ elif prop.name == "runtime.connectionState":
+ conn_state = prop.val
+ # Ignoring the oprhaned or inaccessible VMs
+ if conn_state not in ["orphaned", "inaccessible"]:
+ lst_vm_names.append(vm_name)
+ LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
+ return lst_vm_names
+
+ def spawn(self, instance):
+ """
+ Creates a VM instance.
+
+ Steps followed are:
+ 1. Create a VM with no disk and the specifics in the instance object
+ like RAM size.
+ 2. Create a dummy vmdk of the size of the disk file that is to be
+ uploaded. This is required just to create the metadata file.
+ 3. Delete the -flat.vmdk file created in the above step and retain
+ the metadata .vmdk file.
+ 4. Upload the disk file.
+ 5. Attach the disk to the VM by reconfiguring the same.
+ 6. Power on the VM.
+ """
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref:
+ raise exception.Duplicate(_("Attempted to create a VM with a name"
+ " %s, but that already exists on the host") % instance.name)
+
+ client_factory = self._session._get_vim().client.factory
+ service_content = self._session._get_vim().get_service_content()
+
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+
+ net_name = network['bridge']
+
+ def _check_if_network_bridge_exists():
+ network_ref = \
+ network_utils.get_network_with_the_name(self._session,
+ net_name)
+ if network_ref is None:
+ raise exception.NotFound(_("Network with the name '%s' doesn't"
+ " exist on the ESX host") % net_name)
+
+ _check_if_network_bridge_exists()
+
+ def _get_datastore_ref():
+ """Get the datastore list and choose the first local storage."""
+ data_stores = self._session._call_method(vim_util, "get_objects",
+ "Datastore", ["summary.type", "summary.name"])
+ for elem in data_stores:
+ ds_name = None
+ ds_type = None
+ for prop in elem.propSet:
+ if prop.name == "summary.type":
+ ds_type = prop.val
+ elif prop.name == "summary.name":
+ ds_name = prop.val
+ # Local storage identifier
+ if ds_type == "VMFS":
+ data_store_name = ds_name
+ return data_store_name
+
+ if data_store_name is None:
+ msg = _("Couldn't get a local Datastore reference")
+ LOG.exception(msg)
+ raise exception.Error(msg)
+
+ data_store_name = _get_datastore_ref()
+
+ def _get_image_properties():
+ """
+ Get the Size of the flat vmdk file that is there on the storage
+ repository.
+ """
+ image_size, image_properties = \
+ vmware_images.get_vmdk_size_and_properties(
+ instance.image_id, instance)
+ vmdk_file_size_in_kb = int(image_size) / 1024
+ os_type = image_properties.get("vmware_ostype", "otherGuest")
+ adapter_type = image_properties.get("vmware_adaptertype",
+ "lsiLogic")
+ return vmdk_file_size_in_kb, os_type, adapter_type
+
+ vmdk_file_size_in_kb, os_type, adapter_type = _get_image_properties()
+
+ def _get_vmfolder_and_res_pool_mors():
+ """Get the Vm folder ref from the datacenter."""
+ dc_objs = self._session._call_method(vim_util, "get_objects",
+ "Datacenter", ["vmFolder"])
+ # There is only one default datacenter in a standalone ESX host
+ vm_folder_mor = dc_objs[0].propSet[0].val
+
+ # Get the resource pool. Taking the first resource pool coming our
+ # way. Assuming that is the default resource pool.
+ res_pool_mor = self._session._call_method(vim_util, "get_objects",
+ "ResourcePool")[0].obj
+ return vm_folder_mor, res_pool_mor
+
+ vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
+
+ # Get the create vm config spec
+ config_spec = vm_util.get_vm_create_spec(client_factory, instance,
+ data_store_name, net_name, os_type)
+
+ def _execute_create_vm():
+ """Create VM on ESX host."""
+ LOG.debug(_("Creating VM with the name %s on the ESX host") %
+ instance.name)
+ # Create the VM on the ESX host
+ vm_create_task = self._session._call_method(
+ self._session._get_vim(),
+ "CreateVM_Task", vm_folder_mor,
+ config=config_spec, pool=res_pool_mor)
+ self._session._wait_for_task(instance.id, vm_create_task)
+
+ LOG.debug(_("Created VM with the name %s on the ESX host") %
+ instance.name)
+
+ _execute_create_vm()
+
+ # Set the machine id for the VM for setting the IP
+ self._set_machine_id(client_factory, instance)
+
+ # Naming the VM files in correspondence with the VM instance name
+ # The flat vmdk file name
+ flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % (instance.name,
+ instance.name)
+ # The vmdk meta-data file
+ uploaded_vmdk_name = "%s/%s.vmdk" % (instance.name, instance.name)
+ flat_uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
+ flat_uploaded_vmdk_name)
+ uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
+ uploaded_vmdk_name)
+
+ def _create_virtual_disk():
+ """Create a virtual disk of the size of flat vmdk file."""
+ # Create a Virtual Disk of the size of the flat vmdk file. This is
+ # done just to generate the meta-data file whose specifics
+ # depend on the size of the disk, thin/thick provisioning and the
+ # storage adapter type.
+ # Here we assume thick provisioning and lsiLogic for the adapter
+ # type
+ LOG.debug(_("Creating Virtual Disk of size "
+ "%(vmdk_file_size_in_kb)s KB and adapter type "
+ "%(adapter_type)s on the ESX host local store"
+ " %(data_store_name)s") %
+ {"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
+ "adapter_type": adapter_type,
+ "data_store_name": data_store_name})
+ vmdk_create_spec = vm_util.get_vmdk_create_spec(client_factory,
+ vmdk_file_size_in_kb, adapter_type)
+ vmdk_create_task = self._session._call_method(
+ self._session._get_vim(),
+ "CreateVirtualDisk_Task",
+ service_content.virtualDiskManager,
+ name=uploaded_vmdk_path,
+ datacenter=self._get_datacenter_name_and_ref()[0],
+ spec=vmdk_create_spec)
+ self._session._wait_for_task(instance.id, vmdk_create_task)
+ LOG.debug(_("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
+ " KB on the ESX host local store "
+ "%(data_store_name)s") %
+ {"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
+ "data_store_name": data_store_name})
+
+ _create_virtual_disk()
+
+ def _delete_disk_file():
+ LOG.debug(_("Deleting the file %(flat_uploaded_vmdk_path)s "
+ "on the ESX host local"
+ "store %(data_store_name)s") %
+ {"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
+ "data_store_name": data_store_name})
+ # Delete the -flat.vmdk file created. .vmdk file is retained.
+ vmdk_delete_task = self._session._call_method(
+ self._session._get_vim(),
+ "DeleteDatastoreFile_Task",
+ service_content.fileManager,
+ name=flat_uploaded_vmdk_path)
+ self._session._wait_for_task(instance.id, vmdk_delete_task)
+ LOG.debug(_("Deleted the file %(flat_uploaded_vmdk_path)s on the "
+ "ESX host local store %(data_store_name)s") %
+ {"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
+ "data_store_name": data_store_name})
+
+ _delete_disk_file()
+
+ cookies = self._session._get_vim().client.options.transport.cookiejar
+
+ def _fetch_image_on_esx_datastore():
+ """Fetch image from Glance to ESX datastore."""
+ LOG.debug(_("Downloading image file data %(image_id)s to the ESX "
+ "data store %(data_store_name)s") %
+ ({'image_id': instance.image_id,
+ 'data_store_name': data_store_name}))
+ # Upload the -flat.vmdk file whose meta-data file we just created
+ # above
+ vmware_images.fetch_image(
+ instance.image_id,
+ instance,
+ host=self._session._host_ip,
+ data_center_name=self._get_datacenter_name_and_ref()[1],
+ datastore_name=data_store_name,
+ cookies=cookies,
+ file_path=flat_uploaded_vmdk_name)
+ LOG.debug(_("Downloaded image file data %(image_id)s to the ESX "
+ "data store %(data_store_name)s") %
+ ({'image_id': instance.image_id,
+ 'data_store_name': data_store_name}))
+ _fetch_image_on_esx_datastore()
+
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+
+ def _attach_vmdk_to_the_vm():
+ """
+ Attach the vmdk uploaded to the VM. VM reconfigure is done
+ to do so.
+ """
+ vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
+ client_factory,
+ vmdk_file_size_in_kb, uploaded_vmdk_path,
+ adapter_type)
+ LOG.debug(_("Reconfiguring VM instance %s to attach the image "
+ "disk") % instance.name)
+ reconfig_task = self._session._call_method(
+ self._session._get_vim(),
+ "ReconfigVM_Task", vm_ref,
+ spec=vmdk_attach_config_spec)
+ self._session._wait_for_task(instance.id, reconfig_task)
+ LOG.debug(_("Reconfigured VM instance %s to attach the image "
+ "disk") % instance.name)
+
+ _attach_vmdk_to_the_vm()
+
+ def _power_on_vm():
+ """Power on the VM."""
+ LOG.debug(_("Powering on the VM instance %s") % instance.name)
+ # Power On the VM
+ power_on_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOnVM_Task", vm_ref)
+ self._session._wait_for_task(instance.id, power_on_task)
+ LOG.debug(_("Powered on the VM instance %s") % instance.name)
+ _power_on_vm()
+
+ def snapshot(self, instance, snapshot_name):
+ """
+ Create snapshot from a running VM instance.
+ Steps followed are:
+ 1. Get the name of the vmdk file which the VM points to right now.
+ Can be a chain of snapshots, so we need to know the last in the
+ chain.
+ 2. Create the snapshot. A new vmdk is created which the VM points to
+ now. The earlier vmdk becomes read-only.
+ 3. Call CopyVirtualDisk which coalesces the disk chain to form a single
+ vmdk, rather a .vmdk metadata file and a -flat.vmdk disk data file.
+ 4. Now upload the -flat.vmdk file to the image store.
+ 5. Delete the coalesced .vmdk and -flat.vmdk created.
+ """
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+
+ client_factory = self._session._get_vim().client.factory
+ service_content = self._session._get_vim().get_service_content()
+
+ def _get_vm_and_vmdk_attribs():
+ # Get the vmdk file name that the VM is pointing to
+ hardware_devices = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "config.hardware.device")
+ vmdk_file_path_before_snapshot, adapter_type = \
+ vm_util.get_vmdk_file_path_and_adapter_type(client_factory,
+ hardware_devices)
+ datastore_name = vm_util.split_datastore_path(
+ vmdk_file_path_before_snapshot)[0]
+ os_type = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "summary.config.guestId")
+ return (vmdk_file_path_before_snapshot, adapter_type,
+ datastore_name, os_type)
+
+ vmdk_file_path_before_snapshot, adapter_type, datastore_name,\
+ os_type = _get_vm_and_vmdk_attribs()
+
+ def _create_vm_snapshot():
+ # Create a snapshot of the VM
+ LOG.debug(_("Creating Snapshot of the VM instance %s ") %
+ instance.name)
+ snapshot_task = self._session._call_method(
+ self._session._get_vim(),
+ "CreateSnapshot_Task", vm_ref,
+ name="%s-snapshot" % instance.name,
+ description="Taking Snapshot of the VM",
+ memory=True,
+ quiesce=True)
+ self._session._wait_for_task(instance.id, snapshot_task)
+ LOG.debug(_("Created Snapshot of the VM instance %s ") %
+ instance.name)
+
+ _create_vm_snapshot()
+
+ def _check_if_tmp_folder_exists():
+ # Copy the contents of the VM that were there just before the
+ # snapshot was taken
+ ds_ref_ret = vim_util.get_dynamic_property(
+ self._session._get_vim(),
+ vm_ref,
+ "VirtualMachine",
+ "datastore")
+ if not ds_ref_ret:
+ raise exception.NotFound(_("Failed to get the datastore "
+ "reference(s) which the VM uses"))
+ ds_ref = ds_ref_ret.ManagedObjectReference[0]
+ ds_browser = vim_util.get_dynamic_property(
+ self._session._get_vim(),
+ ds_ref,
+ "Datastore",
+ "browser")
+ # Check if the vmware-tmp folder exists or not. If not, create one
+ tmp_folder_path = vm_util.build_datastore_path(datastore_name,
+ "vmware-tmp")
+ if not self._path_exists(ds_browser, tmp_folder_path):
+ self._mkdir(vm_util.build_datastore_path(datastore_name,
+ "vmware-tmp"))
+
+ _check_if_tmp_folder_exists()
+
+ # Generate a random vmdk file name to which the coalesced vmdk content
+ # will be copied to. A random name is chosen so that we don't have
+ # name clashes.
+ random_name = str(uuid.uuid4())
+ dest_vmdk_file_location = vm_util.build_datastore_path(datastore_name,
+ "vmware-tmp/%s.vmdk" % random_name)
+ dc_ref = self._get_datacenter_name_and_ref()[0]
+
+ def _copy_vmdk_content():
+ # Copy the contents of the disk ( or disks, if there were snapshots
+ # done earlier) to a temporary vmdk file.
+ copy_spec = vm_util.get_copy_virtual_disk_spec(client_factory,
+ adapter_type)
+ LOG.debug(_("Copying disk data before snapshot of the VM "
+ " instance %s") % instance.name)
+ copy_disk_task = self._session._call_method(
+ self._session._get_vim(),
+ "CopyVirtualDisk_Task",
+ service_content.virtualDiskManager,
+ sourceName=vmdk_file_path_before_snapshot,
+ sourceDatacenter=dc_ref,
+ destName=dest_vmdk_file_location,
+ destDatacenter=dc_ref,
+ destSpec=copy_spec,
+ force=False)
+ self._session._wait_for_task(instance.id, copy_disk_task)
+ LOG.debug(_("Copied disk data before snapshot of the VM "
+ "instance %s") % instance.name)
+
+ _copy_vmdk_content()
+
+ cookies = self._session._get_vim().client.options.transport.cookiejar
+
+ def _upload_vmdk_to_image_repository():
+ # Upload the contents of -flat.vmdk file which has the disk data.
+ LOG.debug(_("Uploading image %s") % snapshot_name)
+ vmware_images.upload_image(
+ snapshot_name,
+ instance,
+ os_type=os_type,
+ adapter_type=adapter_type,
+ image_version=1,
+ host=self._session._host_ip,
+ data_center_name=self._get_datacenter_name_and_ref()[1],
+ datastore_name=datastore_name,
+ cookies=cookies,
+ file_path="vmware-tmp/%s-flat.vmdk" % random_name)
+ LOG.debug(_("Uploaded image %s") % snapshot_name)
+
+ _upload_vmdk_to_image_repository()
+
+ def _clean_temp_data():
+ """
+ Delete temporary vmdk files generated in image handling
+ operations.
+ """
+ # Delete the temporary vmdk created above.
+ LOG.debug(_("Deleting temporary vmdk file %s")
+ % dest_vmdk_file_location)
+ remove_disk_task = self._session._call_method(
+ self._session._get_vim(),
+ "DeleteVirtualDisk_Task",
+ service_content.virtualDiskManager,
+ name=dest_vmdk_file_location,
+ datacenter=dc_ref)
+ self._session._wait_for_task(instance.id, remove_disk_task)
+ LOG.debug(_("Deleted temporary vmdk file %s")
+ % dest_vmdk_file_location)
+
+ _clean_temp_data()
+
+ def reboot(self, instance):
+ """Reboot a VM instance."""
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+ lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
+ "summary.guest.toolsRunningStatus"]
+ props = self._session._call_method(vim_util, "get_object_properties",
+ None, vm_ref, "VirtualMachine",
+ lst_properties)
+ pwr_state = None
+ tools_status = None
+ tools_running_status = False
+ for elem in props:
+ for prop in elem.propSet:
+ if prop.name == "runtime.powerState":
+ pwr_state = prop.val
+ elif prop.name == "summary.guest.toolsStatus":
+ tools_status = prop.val
+ elif prop.name == "summary.guest.toolsRunningStatus":
+ tools_running_status = prop.val
+
+ # Raise an exception if the VM is not powered On.
+ if pwr_state not in ["poweredOn"]:
+ raise exception.Invalid(_("instance - %s not poweredOn. So can't "
+ "be rebooted.") % instance.name)
+
+ # If latest vmware tools are installed in the VM, and that the tools
+ # are running, then only do a guest reboot. Otherwise do a hard reset.
+ if (tools_status == "toolsOk" and
+ tools_running_status == "guestToolsRunning"):
+ LOG.debug(_("Rebooting guest OS of VM %s") % instance.name)
+ self._session._call_method(self._session._get_vim(), "RebootGuest",
+ vm_ref)
+ LOG.debug(_("Rebooted guest OS of VM %s") % instance.name)
+ else:
+ LOG.debug(_("Doing hard reboot of VM %s") % instance.name)
+ reset_task = self._session._call_method(self._session._get_vim(),
+ "ResetVM_Task", vm_ref)
+ self._session._wait_for_task(instance.id, reset_task)
+ LOG.debug(_("Did hard reboot of VM %s") % instance.name)
+
+ def destroy(self, instance):
+ """
+ Destroy a VM instance. Steps followed are:
+ 1. Power off the VM, if it is in poweredOn state.
+ 2. Un-register a VM.
+ 3. Delete the contents of the folder holding the VM related data.
+ """
+ try:
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ LOG.debug(_("instance - %s not present") % instance.name)
+ return
+ lst_properties = ["config.files.vmPathName", "runtime.powerState"]
+ props = self._session._call_method(vim_util,
+ "get_object_properties",
+ None, vm_ref, "VirtualMachine", lst_properties)
+ pwr_state = None
+ for elem in props:
+ vm_config_pathname = None
+ for prop in elem.propSet:
+ if prop.name == "runtime.powerState":
+ pwr_state = prop.val
+ elif prop.name == "config.files.vmPathName":
+ vm_config_pathname = prop.val
+ if vm_config_pathname:
+ datastore_name, vmx_file_path = \
+ vm_util.split_datastore_path(vm_config_pathname)
+ # Power off the VM if it is in PoweredOn state.
+ if pwr_state == "poweredOn":
+ LOG.debug(_("Powering off the VM %s") % instance.name)
+ poweroff_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOffVM_Task", vm_ref)
+ self._session._wait_for_task(instance.id, poweroff_task)
+ LOG.debug(_("Powered off the VM %s") % instance.name)
+
+ # Un-register the VM
+ try:
+ LOG.debug(_("Unregistering the VM %s") % instance.name)
+ self._session._call_method(self._session._get_vim(),
+ "UnregisterVM", vm_ref)
+ LOG.debug(_("Unregistered the VM %s") % instance.name)
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
+ " while un-registering the VM: %s") % str(excep))
+
+ # Delete the folder holding the VM related content on
+ # the datastore.
+ try:
+ dir_ds_compliant_path = vm_util.build_datastore_path(
+ datastore_name,
+ os.path.dirname(vmx_file_path))
+ LOG.debug(_("Deleting contents of the VM %(name)s from "
+ "datastore %(datastore_name)s") %
+ ({'name': instance.name,
+ 'datastore_name': datastore_name}))
+ delete_task = self._session._call_method(
+ self._session._get_vim(),
+ "DeleteDatastoreFile_Task",
+ self._session._get_vim().get_service_content().fileManager,
+ name=dir_ds_compliant_path)
+ self._session._wait_for_task(instance.id, delete_task)
+ LOG.debug(_("Deleted contents of the VM %(name)s from "
+ "datastore %(datastore_name)s") %
+ ({'name': instance.name,
+ 'datastore_name': datastore_name}))
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:vmops:destroy, "
+ "got this exception while deleting"
+ " the VM contents from the disk: %s")
+ % str(excep))
+ except Exception, exc:
+ LOG.exception(exc)
+
+ def pause(self, instance, callback):
+ """Pause a VM instance."""
+ raise exception.APIError("pause not supported for vmwareapi")
+
+ def unpause(self, instance, callback):
+ """Un-Pause a VM instance."""
+ raise exception.APIError("unpause not supported for vmwareapi")
+
+ def suspend(self, instance, callback):
+ """Suspend the specified instance."""
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+
+ pwr_state = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "runtime.powerState")
+ # Only PoweredOn VMs can be suspended.
+ if pwr_state == "poweredOn":
+ LOG.debug(_("Suspending the VM %s ") % instance.name)
+ suspend_task = self._session._call_method(self._session._get_vim(),
+ "SuspendVM_Task", vm_ref)
+ self._wait_with_callback(instance.id, suspend_task, callback)
+ LOG.debug(_("Suspended the VM %s ") % instance.name)
+ # Raise Exception if VM is poweredOff
+ elif pwr_state == "poweredOff":
+ raise exception.Invalid(_("instance - %s is poweredOff and hence "
+ " can't be suspended.") % instance.name)
+ LOG.debug(_("VM %s was already in suspended state. So returning "
+ "without doing anything") % instance.name)
+
+ def resume(self, instance, callback):
+ """Resume the specified instance."""
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+
+ pwr_state = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "runtime.powerState")
+ if pwr_state.lower() == "suspended":
+ LOG.debug(_("Resuming the VM %s") % instance.name)
+ suspend_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOnVM_Task", vm_ref)
+ self._wait_with_callback(instance.id, suspend_task, callback)
+ LOG.debug(_("Resumed the VM %s ") % instance.name)
+ else:
+ raise exception.Invalid(_("instance - %s not in Suspended state "
+ "and hence can't be Resumed.") % instance.name)
+
+ def get_info(self, instance_name):
+ """Return data about the VM instance."""
+ vm_ref = self._get_vm_ref_from_the_name(instance_name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance_name)
+
+ lst_properties = ["summary.config.numCpu",
+ "summary.config.memorySizeMB",
+ "runtime.powerState"]
+ vm_props = self._session._call_method(vim_util,
+ "get_object_properties", None, vm_ref, "VirtualMachine",
+ lst_properties)
+ max_mem = None
+ pwr_state = None
+ num_cpu = None
+ for elem in vm_props:
+ for prop in elem.propSet:
+ if prop.name == "summary.config.numCpu":
+ num_cpu = int(prop.val)
+ elif prop.name == "summary.config.memorySizeMB":
+ # In MB, but we want in KB
+ max_mem = int(prop.val) * 1024
+ elif prop.name == "runtime.powerState":
+ pwr_state = VMWARE_POWER_STATES[prop.val]
+
+ return {'state': pwr_state,
+ 'max_mem': max_mem,
+ 'mem': max_mem,
+ 'num_cpu': num_cpu,
+ 'cpu_time': 0}
+
+ def get_diagnostics(self, instance):
+ """Return data about VM diagnostics."""
+ raise exception.APIError("get_diagnostics not implemented for "
+ "vmwareapi")
+
+ def get_console_output(self, instance):
+ """Return snapshot of console."""
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+ param_list = {"id": str(vm_ref)}
+ base_url = "%s://%s/screen?%s" % (self._session._scheme,
+ self._session._host_ip,
+ urllib.urlencode(param_list))
+ request = urllib2.Request(base_url)
+ base64string = base64.encodestring(
+ '%s:%s' % (
+ self._session._host_username,
+ self._session._host_password)).replace('\n', '')
+ request.add_header("Authorization", "Basic %s" % base64string)
+ result = urllib2.urlopen(request)
+ if result.code == 200:
+ return result.read()
+ else:
+ return ""
+
+ def get_ajax_console(self, instance):
+ """Return link to instance's ajax console."""
+ return 'http://fakeajaxconsole/fake_url'
+
+ def _set_machine_id(self, client_factory, instance):
+ """
+ Set the machine id of the VM for guest tools to pick up and change
+ the IP.
+ """
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+ mac_addr = instance.mac_address
+ net_mask = network["netmask"]
+ gateway = network["gateway"]
+ ip_addr = db.instance_get_fixed_address(context.get_admin_context(),
+ instance['id'])
+ machine_id_chanfge_spec = \
+ vm_util.get_machine_id_change_spec(client_factory, mac_addr,
+ ip_addr, net_mask, gateway)
+ LOG.debug(_("Reconfiguring VM instance %(name)s to set the machine id "
+ "with ip - %(ip_addr)s") %
+ ({'name': instance.name,
+ 'ip_addr': ip_addr}))
+ reconfig_task = self._session._call_method(self._session._get_vim(),
+ "ReconfigVM_Task", vm_ref,
+ spec=machine_id_chanfge_spec)
+ self._session._wait_for_task(instance.id, reconfig_task)
+ LOG.debug(_("Reconfigured VM instance %(name)s to set the machine id "
+ "with ip - %(ip_addr)s") %
+ ({'name': instance.name,
+ 'ip_addr': ip_addr}))
+
+ def _get_datacenter_name_and_ref(self):
+ """Get the datacenter name and the reference."""
+ dc_obj = self._session._call_method(vim_util, "get_objects",
+ "Datacenter", ["name"])
+ return dc_obj[0].obj, dc_obj[0].propSet[0].val
+
+ def _path_exists(self, ds_browser, ds_path):
+ """Check if the path exists on the datastore."""
+ search_task = self._session._call_method(self._session._get_vim(),
+ "SearchDatastore_Task",
+ ds_browser,
+ datastorePath=ds_path)
+ # Wait till the state changes from queued or running.
+ # If an error state is returned, it means that the path doesn't exist.
+ while True:
+ task_info = self._session._call_method(vim_util,
+ "get_dynamic_property",
+ search_task, "Task", "info")
+ if task_info.state in ['queued', 'running']:
+ time.sleep(2)
+ continue
+ break
+ if task_info.state == "error":
+ return False
+ return True
+
+ def _mkdir(self, ds_path):
+ """
+ Creates a directory at the path specified. If it is just "NAME",
+ then a directory with this name is created at the topmost level of the
+ DataStore.
+ """
+ LOG.debug(_("Creating directory with path %s") % ds_path)
+ self._session._call_method(self._session._get_vim(), "MakeDirectory",
+ self._session._get_vim().get_service_content().fileManager,
+ name=ds_path, createParentDirectories=False)
+ LOG.debug(_("Created directory with path %s") % ds_path)
+
+ def _get_vm_ref_from_the_name(self, vm_name):
+ """Get reference to the VM with the name specified."""
+ vms = self._session._call_method(vim_util, "get_objects",
+ "VirtualMachine", ["name"])
+ for vm in vms:
+ if vm.propSet[0].val == vm_name:
+ return vm.obj
+ return None
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py new file mode 100644 index 000000000..50c6baedf --- /dev/null +++ b/nova/virt/vmwareapi/vmware_images.py @@ -0,0 +1,201 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Utility functions for Image transfer.
+"""
+
+from glance import client
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.virt.vmwareapi import io_util
+from nova.virt.vmwareapi import read_write_util
+
+LOG = logging.getLogger("nova.virt.vmwareapi.vmware_images")
+
+FLAGS = flags.FLAGS
+
+QUEUE_BUFFER_SIZE = 10
+
+
+def start_transfer(read_file_handle, data_size, write_file_handle=None,
+ glance_client=None, image_id=None, image_meta={}):
+ """Start the data transfer from the reader to the writer.
+ Reader writes to the pipe and the writer reads from the pipe. This means
+ that the total transfer time boils down to the slower of the read/write
+ and not the addition of the two times."""
+ # The pipe that acts as an intermediate store of data for reader to write
+ # to and writer to grab from.
+ thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
+ # The read thread. In case of glance it is the instance of the
+ # GlanceFileRead class. The glance client read returns an iterator
+ # and this class wraps that iterator to provide datachunks in calls
+ # to read.
+ read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
+
+ # In case of Glance - VMWare transfer, we just need a handle to the
+ # HTTP Connection that is to send transfer data to the VMWare datastore.
+ if write_file_handle:
+ write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
+ # In case of VMWare - Glance transfer, we relinquish VMWare HTTP file read
+ # handle to Glance Client instance, but to be sure of the transfer we need
+ # to be sure of the status of the image on glnace changing to active.
+ # The GlanceWriteThread handles the same for us.
+ elif glance_client and image_id:
+ write_thread = io_util.GlanceWriteThread(thread_safe_pipe,
+ glance_client, image_id, image_meta)
+ # Start the read and write threads.
+ read_event = read_thread.start()
+ write_event = write_thread.start()
+ try:
+ # Wait on the read and write events to signal their end
+ read_event.wait()
+ write_event.wait()
+ except Exception, exc:
+ # In case of any of the reads or writes raising an exception,
+ # stop the threads so that we un-necessarily don't keep the other one
+ # waiting.
+ read_thread.stop()
+ write_thread.stop()
+
+ # Log and raise the exception.
+ LOG.exception(exc)
+ raise exception.Error(exc)
+ finally:
+ # No matter what, try closing the read and write handles, if it so
+ # applies.
+ read_file_handle.close()
+ if write_file_handle:
+ write_file_handle.close()
+
+
+def fetch_image(image, instance, **kwargs):
+ """Fetch an image for attaching to the newly created VM."""
+ # Depending upon the image service, make appropriate image service call
+ if FLAGS.image_service == "nova.image.glance.GlanceImageService":
+ func = _get_glance_image
+ elif FLAGS.image_service == "nova.image.s3.S3ImageService":
+ func = _get_s3_image
+ elif FLAGS.image_service == "nova.image.local.LocalImageService":
+ func = _get_local_image
+ else:
+ raise NotImplementedError(_("The Image Service %s is not implemented")
+ % FLAGS.image_service)
+ return func(image, instance, **kwargs)
+
+
+def upload_image(image, instance, **kwargs):
+ """Upload the newly snapshotted VM disk file."""
+ # Depending upon the image service, make appropriate image service call
+ if FLAGS.image_service == "nova.image.glance.GlanceImageService":
+ func = _put_glance_image
+ elif FLAGS.image_service == "nova.image.s3.S3ImageService":
+ func = _put_s3_image
+ elif FLAGS.image_service == "nova.image.local.LocalImageService":
+ func = _put_local_image
+ else:
+ raise NotImplementedError(_("The Image Service %s is not implemented")
+ % FLAGS.image_service)
+ return func(image, instance, **kwargs)
+
+
+def _get_glance_image(image, instance, **kwargs):
+ """Download image from the glance image server."""
+ LOG.debug(_("Downloading image %s from glance image server") % image)
+ glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ metadata, read_iter = glance_client.get_image(image)
+ read_file_handle = read_write_util.GlanceFileRead(read_iter)
+ file_size = int(metadata['size'])
+ write_file_handle = read_write_util.VMWareHTTPWriteFile(
+ kwargs.get("host"),
+ kwargs.get("data_center_name"),
+ kwargs.get("datastore_name"),
+ kwargs.get("cookies"),
+ kwargs.get("file_path"),
+ file_size)
+ start_transfer(read_file_handle, file_size,
+ write_file_handle=write_file_handle)
+ LOG.debug(_("Downloaded image %s from glance image server") % image)
+
+
+def _get_s3_image(image, instance, **kwargs):
+ """Download image from the S3 image server."""
+ raise NotImplementedError
+
+
+def _get_local_image(image, instance, **kwargs):
+ """Download image from the local nova compute node."""
+ raise NotImplementedError
+
+
+def _put_glance_image(image, instance, **kwargs):
+ """Upload the snapshotted vm disk file to Glance image server."""
+ LOG.debug(_("Uploading image %s to the Glance image server") % image)
+ read_file_handle = read_write_util.VmWareHTTPReadFile(
+ kwargs.get("host"),
+ kwargs.get("data_center_name"),
+ kwargs.get("datastore_name"),
+ kwargs.get("cookies"),
+ kwargs.get("file_path"))
+ file_size = read_file_handle.get_size()
+ glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ # The properties and other fields that we need to set for the image.
+ image_metadata = {"is_public": True,
+ "disk_format": "vmdk",
+ "container_format": "bare",
+ "type": "vmdk",
+ "properties": {"vmware_adaptertype":
+ kwargs.get("adapter_type"),
+ "vmware_ostype": kwargs.get("os_type"),
+ "vmware_image_version":
+ kwargs.get("image_version")}}
+ start_transfer(read_file_handle, file_size, glance_client=glance_client,
+ image_id=image, image_meta=image_metadata)
+ LOG.debug(_("Uploaded image %s to the Glance image server") % image)
+
+
+def _put_local_image(image, instance, **kwargs):
+ """Upload the snapshotted vm disk file to the local nova compute node."""
+ raise NotImplementedError
+
+
+def _put_s3_image(image, instance, **kwargs):
+ """Upload the snapshotted vm disk file to S3 image server."""
+ raise NotImplementedError
+
+
+def get_vmdk_size_and_properties(image, instance):
+ """
+ Get size of the vmdk file that is to be downloaded for attach in spawn.
+ Need this to create the dummy virtual disk for the meta-data file. The
+ geometry of the disk created depends on the size.
+ """
+
+ LOG.debug(_("Getting image size for the image %s") % image)
+ if FLAGS.image_service == "nova.image.glance.GlanceImageService":
+ glance_client = client.Client(FLAGS.glance_host,
+ FLAGS.glance_port)
+ meta_data = glance_client.get_image_meta(image)
+ size, properties = meta_data["size"], meta_data["properties"]
+ elif FLAGS.image_service == "nova.image.s3.S3ImageService":
+ raise NotImplementedError
+ elif FLAGS.image_service == "nova.image.local.LocalImageService":
+ raise NotImplementedError
+ LOG.debug(_("Got image size of %(size)s for the image %(image)s") %
+ locals())
+ return size, properties
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py new file mode 100644 index 000000000..87c3fa299 --- /dev/null +++ b/nova/virt/vmwareapi_conn.py @@ -0,0 +1,375 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A connection to the VMware ESX platform.
+
+**Related Flags**
+
+:vmwareapi_host_ip: IPAddress of VMware ESX server.
+:vmwareapi_host_username: Username for connection to VMware ESX Server.
+:vmwareapi_host_password: Password for connection to VMware ESX Server.
+:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
+ remote tasks
+ (default: 1.0).
+:vmwareapi_api_retry_count: The API retry count in case of failure such as
+ network failures (socket errors etc.)
+ (default: 10).
+
+"""
+
+import time
+
+from eventlet import event
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.virt.vmwareapi import error_util
+from nova.virt.vmwareapi import vim
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi.vmops import VMWareVMOps
+
+LOG = logging.getLogger("nova.virt.vmwareapi_conn")
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vmwareapi_host_ip',
+ None,
+ 'URL for connection to VMWare ESX host.'
+ 'Required if connection_type is vmwareapi.')
+flags.DEFINE_string('vmwareapi_host_username',
+ None,
+ 'Username for connection to VMWare ESX host.'
+ 'Used only if connection_type is vmwareapi.')
+flags.DEFINE_string('vmwareapi_host_password',
+ None,
+ 'Password for connection to VMWare ESX host.'
+ 'Used only if connection_type is vmwareapi.')
+flags.DEFINE_float('vmwareapi_task_poll_interval',
+ 5.0,
+ 'The interval used for polling of remote tasks '
+ 'Used only if connection_type is vmwareapi')
+flags.DEFINE_float('vmwareapi_api_retry_count',
+ 10,
+ 'The number of times we retry on failures, '
+ 'e.g., socket error, etc.'
+ 'Used only if connection_type is vmwareapi')
+flags.DEFINE_string('vmwareapi_vlan_interface',
+ 'vmnic0',
+ 'Physical ethernet adapter name for vlan networking')
+
+TIME_BETWEEN_API_CALL_RETRIES = 2.0
+
+
+class Failure(Exception):
+ """Base Exception class for handling task failures."""
+
+ def __init__(self, details):
+ self.details = details
+
+ def __str__(self):
+ return str(self.details)
+
+
+def get_connection(_):
+ """Sets up the ESX host connection."""
+ host_ip = FLAGS.vmwareapi_host_ip
+ host_username = FLAGS.vmwareapi_host_username
+ host_password = FLAGS.vmwareapi_host_password
+ api_retry_count = FLAGS.vmwareapi_api_retry_count
+ if not host_ip or host_username is None or host_password is None:
+ raise Exception(_("Must specify vmwareapi_host_ip,"
+ "vmwareapi_host_username "
+ "and vmwareapi_host_password to use"
+ "connection_type=vmwareapi"))
+ return VMWareESXConnection(host_ip, host_username, host_password,
+ api_retry_count)
+
+
+class VMWareESXConnection(object):
+ """The ESX host connection object."""
+
+ def __init__(self, host_ip, host_username, host_password,
+ api_retry_count, scheme="https"):
+ session = VMWareAPISession(host_ip, host_username, host_password,
+ api_retry_count, scheme=scheme)
+ self._vmops = VMWareVMOps(session)
+
+ def init_host(self, host):
+ """Do the initialization that needs to be done."""
+ # FIXME(sateesh): implement this
+ pass
+
+ def list_instances(self):
+ """List VM instances."""
+ return self._vmops.list_instances()
+
+ def spawn(self, instance):
+ """Create VM instance."""
+ self._vmops.spawn(instance)
+
+ def snapshot(self, instance, name):
+ """Create snapshot from a running VM instance."""
+ self._vmops.snapshot(instance, name)
+
+ def reboot(self, instance):
+ """Reboot VM instance."""
+ self._vmops.reboot(instance)
+
+ def destroy(self, instance):
+ """Destroy VM instance."""
+ self._vmops.destroy(instance)
+
+ def pause(self, instance, callback):
+ """Pause VM instance."""
+ self._vmops.pause(instance, callback)
+
+ def unpause(self, instance, callback):
+ """Unpause paused VM instance."""
+ self._vmops.unpause(instance, callback)
+
+ def suspend(self, instance, callback):
+ """Suspend the specified instance."""
+ self._vmops.suspend(instance, callback)
+
+ def resume(self, instance, callback):
+ """Resume the suspended VM instance."""
+ self._vmops.resume(instance, callback)
+
+ def get_info(self, instance_id):
+ """Return info about the VM instance."""
+ return self._vmops.get_info(instance_id)
+
+ def get_diagnostics(self, instance):
+ """Return data about VM diagnostics."""
+ return self._vmops.get_info(instance)
+
+ def get_console_output(self, instance):
+ """Return snapshot of console."""
+ return self._vmops.get_console_output(instance)
+
+ def get_ajax_console(self, instance):
+ """Return link to instance's ajax console."""
+ return self._vmops.get_ajax_console(instance)
+
+ def attach_volume(self, instance_name, device_path, mountpoint):
+ """Attach volume storage to VM instance."""
+ pass
+
+ def detach_volume(self, instance_name, mountpoint):
+ """Detach volume storage to VM instance."""
+ pass
+
+ def get_console_pool_info(self, console_type):
+ """Get info about the host on which the VM resides."""
+ return {'address': FLAGS.vmwareapi_host_ip,
+ 'username': FLAGS.vmwareapi_host_username,
+ 'password': FLAGS.vmwareapi_host_password}
+
+ def update_available_resource(self, ctxt, host):
+ """This method is supported only by libvirt."""
+ return
+
+
+class VMWareAPISession(object):
+ """
+ Sets up a session with the ESX host and handles all
+ the calls made to the host.
+ """
+
+ def __init__(self, host_ip, host_username, host_password,
+ api_retry_count, scheme="https"):
+ self._host_ip = host_ip
+ self._host_username = host_username
+ self._host_password = host_password
+ self.api_retry_count = api_retry_count
+ self._scheme = scheme
+ self._session_id = None
+ self.vim = None
+ self._create_session()
+
+ def _get_vim_object(self):
+ """Create the VIM Object instance."""
+ return vim.Vim(protocol=self._scheme, host=self._host_ip)
+
+ def _create_session(self):
+ """Creates a session with the ESX host."""
+ while True:
+ try:
+ # Login and setup the session with the ESX host for making
+ # API calls
+ self.vim = self._get_vim_object()
+ session = self.vim.Login(
+ self.vim.get_service_content().sessionManager,
+ userName=self._host_username,
+ password=self._host_password)
+ # Terminate the earlier session, if possible ( For the sake of
+ # preserving sessions as there is a limit to the number of
+ # sessions we can have )
+ if self._session_id:
+ try:
+ self.vim.TerminateSession(
+ self.vim.get_service_content().sessionManager,
+ sessionId=[self._session_id])
+ except Exception, excep:
+ # This exception is something we can live with. It is
+ # just an extra caution on our side. The session may
+ # have been cleared. We could have made a call to
+ # SessionIsActive, but that is an overhead because we
+ # anyway would have to call TerminateSession.
+ LOG.debug(excep)
+ self._session_id = session.key
+ return
+ except Exception, excep:
+ LOG.critical(_("In vmwareapi:_create_session, "
+ "got this exception: %s") % excep)
+ raise exception.Error(excep)
+
+ def __del__(self):
+ """Logs-out the session."""
+ # Logout to avoid un-necessary increase in session count at the
+ # ESX host
+ try:
+ self.vim.Logout(self.vim.get_service_content().sessionManager)
+ except Exception, excep:
+ # It is just cautionary on our part to do a logout in del just
+ # to ensure that the session is not left active.
+ LOG.debug(excep)
+
+ def _is_vim_object(self, module):
+ """Check if the module is a VIM Object instance."""
+ return isinstance(module, vim.Vim)
+
+ def _call_method(self, module, method, *args, **kwargs):
+ """
+ Calls a method within the module specified with
+ args provided.
+ """
+ args = list(args)
+ retry_count = 0
+ exc = None
+ last_fault_list = []
+ while True:
+ try:
+ if not self._is_vim_object(module):
+ # If it is not the first try, then get the latest
+ # vim object
+ if retry_count > 0:
+ args = args[1:]
+ args = [self.vim] + args
+ retry_count += 1
+ temp_module = module
+
+ for method_elem in method.split("."):
+ temp_module = getattr(temp_module, method_elem)
+
+ return temp_module(*args, **kwargs)
+ except error_util.VimFaultException, excep:
+ # If it is a Session Fault Exception, it may point
+ # to a session gone bad. So we try re-creating a session
+ # and then proceeding ahead with the call.
+ exc = excep
+ if error_util.FAULT_NOT_AUTHENTICATED in excep.fault_list:
+ # Because of the idle session returning an empty
+ # RetrievePropertiesResponse and also the same is returned
+ # when there is say empty answer to the query for
+ # VMs on the host ( as in no VMs on the host), we have no
+ # way to differentiate.
+ # So if the previous response was also am empty response
+ # and after creating a new session, we get the same empty
+ # response, then we are sure of the response being supposed
+ # to be empty.
+ if error_util.FAULT_NOT_AUTHENTICATED in last_fault_list:
+ return []
+ last_fault_list = excep.fault_list
+ self._create_session()
+ else:
+ # No re-trying for errors for API call has gone through
+ # and is the caller's fault. Caller should handle these
+ # errors. e.g, InvalidArgument fault.
+ break
+ except error_util.SessionOverLoadException, excep:
+ # For exceptions which may come because of session overload,
+ # we retry
+ exc = excep
+ except Exception, excep:
+ # If it is a proper exception, say not having furnished
+ # proper data in the SOAP call or the retry limit having
+ # exceeded, we raise the exception
+ exc = excep
+ break
+ # If retry count has been reached then break and
+ # raise the exception
+ if retry_count > self.api_retry_count:
+ break
+ time.sleep(TIME_BETWEEN_API_CALL_RETRIES)
+
+ LOG.critical(_("In vmwareapi:_call_method, "
+ "got this exception: %s") % exc)
+ raise
+
+ def _get_vim(self):
+ """Gets the VIM object reference."""
+ if self.vim is None:
+ self._create_session()
+ return self.vim
+
+ def _wait_for_task(self, instance_id, task_ref):
+ """
+ Return a Deferred that will give the result of the given task.
+ The task is polled until it completes.
+ """
+ done = event.Event()
+ loop = utils.LoopingCall(self._poll_task, instance_id, task_ref,
+ done)
+ loop.start(FLAGS.vmwareapi_task_poll_interval, now=True)
+ ret_val = done.wait()
+ loop.stop()
+ return ret_val
+
+ def _poll_task(self, instance_id, task_ref, done):
+ """
+ Poll the given task, and fires the given Deferred if we
+ get a result.
+ """
+ try:
+ task_info = self._call_method(vim_util, "get_dynamic_property",
+ task_ref, "Task", "info")
+ task_name = task_info.name
+ action = dict(
+ instance_id=int(instance_id),
+ action=task_name[0:255],
+ error=None)
+ if task_info.state in ['queued', 'running']:
+ return
+ elif task_info.state == 'success':
+ LOG.debug(_("Task [%(task_name)s] %(task_ref)s "
+ "status: success") % locals())
+ done.send("success")
+ else:
+ error_info = str(task_info.error.localizedMessage)
+ action["error"] = error_info
+ LOG.warn(_("Task [%(task_name)s] %(task_ref)s "
+ "status: error %(error_info)s") % locals())
+ done.send_exception(exception.Error(error_info))
+ db.instance_action_create(context.get_admin_context(), action)
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:_poll_task, Got this error %s") % excep)
+ done.send_exception(excep)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 28ce215d8..c30e4b2d1 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -991,8 +991,8 @@ def _write_partition(virtual_size, dev): def execute(*cmd, **kwargs): return utils.execute(*cmd, **kwargs) - execute('parted', '--script', dest, 'mklabel', 'msdos') - execute('parted', '--script', dest, 'mkpart', 'primary', + execute('sudo', 'parted', '--script', dest, 'mklabel', 'msdos') + execute('sudo', 'parted', '--script', dest, 'mkpart', 'primary', '%ds' % primary_first, '%ds' % primary_last) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index af39a3def..419b9ad90 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -117,6 +117,10 @@ class VMOps(object): vm_ref = self._create_vm(instance, vdi_uuid, network_info) self._spawn(instance, vm_ref) + def spawn_rescue(self, instance): + """Spawn a rescue instance""" + self.spawn(instance) + def _create_vm(self, instance, vdi_uuid, network_info=None): """Create VM instance""" instance_name = instance.name @@ -543,7 +547,7 @@ class VMOps(object): vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref) for vbd_ref in vbd_refs: vbd_rec = self._session.get_xenapi().VBD.get_record(vbd_ref) - if vbd_rec["userdevice"] == "1": # primary VBD is always 1 + if vbd_rec.get("userdevice", None) == "1": # VBD is always 1 VMHelper.unplug_vbd(self._session, vbd_ref) VMHelper.destroy_vbd(self._session, vbd_ref) @@ -680,18 +684,18 @@ class VMOps(object): """ rescue_vm_ref = VMHelper.lookup(self._session, - instance.name + "-rescue") + "%s-rescue" % instance.name) if rescue_vm_ref: raise RuntimeError(_( "Instance is already in Rescue Mode: %s" % instance.name)) - vm_ref = self._get_vm_opaque_ref(instance) + vm_ref = VMHelper.lookup(self._session, instance.name) self._shutdown(instance, vm_ref) self._acquire_bootlock(vm_ref) instance._rescue = True - self.spawn(instance) - rescue_vm_ref = self._get_vm_opaque_ref(instance) + self.spawn_rescue(instance) + rescue_vm_ref = VMHelper.lookup(self._session, instance.name) vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"] @@ -708,13 +712,13 @@ class VMOps(object): """ rescue_vm_ref = VMHelper.lookup(self._session, - instance.name + "-rescue") + "%s-rescue" % instance.name) if not rescue_vm_ref: raise exception.NotFound(_( "Instance is not in Rescue Mode: %s" % instance.name)) - original_vm_ref = self._get_vm_opaque_ref(instance) + original_vm_ref = VMHelper.lookup(self._session, instance.name) instance._rescue = False self._destroy_rescue_instance(rescue_vm_ref) @@ -727,24 +731,24 @@ class VMOps(object): in rescue mode for >= the provided timeout """ last_ran = self.poll_rescue_last_ran - if last_ran: - if not utils.is_older_than(last_ran, timeout): - # Do not run. Let's bail. - return - else: - # Update the time tracker and proceed. - self.poll_rescue_last_ran = utils.utcnow() - else: + if not last_ran: # We need a base time to start tracking. self.poll_rescue_last_ran = utils.utcnow() return + if not utils.is_older_than(last_ran, timeout): + # Do not run. Let's bail. + return + + # Update the time tracker and proceed. + self.poll_rescue_last_ran = utils.utcnow() + rescue_vms = [] for instance in self.list_instances(): if instance.endswith("-rescue"): rescue_vms.append(dict(name=instance, - vm_ref=VMHelper.lookup(self._session, - instance))) + vm_ref=VMHelper.lookup(self._session, + instance))) for vm in rescue_vms: rescue_name = vm["name"] diff --git a/nova/volume/api.py b/nova/volume/api.py index 2f4494845..4b4bb9dc5 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -82,7 +82,8 @@ class API(base.Base): self.db.volume_update(context, volume_id, fields) def get(self, context, volume_id): - return self.db.volume_get(context, volume_id) + rv = self.db.volume_get(context, volume_id) + return dict(rv.iteritems()) def get_all(self, context): if context.is_admin: diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 779b46755..28d08201b 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -422,18 +422,17 @@ class ISCSIDriver(VolumeDriver): return properties def _run_iscsiadm(self, iscsi_properties, iscsi_command): - command = ("sudo iscsiadm -m node -T %s -p %s %s" % - (iscsi_properties['target_iqn'], - iscsi_properties['target_portal'], - iscsi_command)) - (out, err) = self._execute(command) + (out, err) = self._execute('sudo', 'iscsiadm', '-m', 'node', '-T', + iscsi_properties['target_iqn'], + '-p', iscsi_properties['target_portal'], + iscsi_command) LOG.debug("iscsiadm %s: stdout=%s stderr=%s" % (iscsi_command, out, err)) return (out, err) def _iscsiadm_update(self, iscsi_properties, property_key, property_value): - iscsi_command = ("--op update -n %s -v %s" % - (property_key, property_value)) + iscsi_command = ('--op', 'update', '-n', property_key, + '-v', property_value) return self._run_iscsiadm(iscsi_properties, iscsi_command) def discover_volume(self, context, volume): @@ -441,7 +440,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) if not iscsi_properties['target_discovered']: - self._run_iscsiadm(iscsi_properties, "--op new") + self._run_iscsiadm(iscsi_properties, ('--op', 'new')) if iscsi_properties.get('auth_method'): self._iscsiadm_update(iscsi_properties, @@ -493,7 +492,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) self._iscsiadm_update(iscsi_properties, "node.startup", "manual") self._run_iscsiadm(iscsi_properties, "--logout") - self._run_iscsiadm(iscsi_properties, "--op delete") + self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) def check_for_export(self, context, volume_id): """Make sure volume is exported.""" diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 9dea35b35..2178389ce 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -64,14 +64,15 @@ flags.DEFINE_boolean('use_local_volumes', True, 'if True, will not discover local volumes') -class VolumeManager(manager.Manager): +class VolumeManager(manager.SchedulerDependentManager): """Manages attachable block storage devices.""" def __init__(self, volume_driver=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" if not volume_driver: volume_driver = FLAGS.volume_driver self.driver = utils.import_object(volume_driver) - super(VolumeManager, self).__init__(*args, **kwargs) + super(VolumeManager, self).__init__(service_name='volume', + *args, **kwargs) # NOTE(vish): Implementation specific db handling is done # by the driver. self.driver.db = self.db |
