diff options
| author | Naveed Massjouni <naveedm9@gmail.com> | 2011-09-02 01:39:31 -0400 |
|---|---|---|
| committer | Naveed Massjouni <naveedm9@gmail.com> | 2011-09-02 01:39:31 -0400 |
| commit | c5ad2b155a2ad7d7aefea316362cc354d0cf4cf3 (patch) | |
| tree | b54c26c6445ddf9d051e9afaa59f5aa2774f2fc8 | |
| parent | 4f72f6c0fb88baaa680e5dd7973a2b1aa9bd6aaf (diff) | |
| parent | d80dc5bbbd1781bd33d9f69b608014e9cc2e41a3 (diff) | |
| download | nova-c5ad2b155a2ad7d7aefea316362cc354d0cf4cf3.tar.gz nova-c5ad2b155a2ad7d7aefea316362cc354d0cf4cf3.tar.xz nova-c5ad2b155a2ad7d7aefea316362cc354d0cf4cf3.zip | |
Merge from trunk
92 files changed, 7744 insertions, 1026 deletions
diff --git a/bin/instance-usage-audit b/bin/instance-usage-audit index a06c6b1b3..7ce5732e7 100755 --- a/bin/instance-usage-audit +++ b/bin/instance-usage-audit @@ -102,9 +102,8 @@ if __name__ == '__main__': logging.setup() begin, end = time_period(FLAGS.instance_usage_audit_period) print "Creating usages for %s until %s" % (str(begin), str(end)) - instances = db.instance_get_active_by_window(context.get_admin_context(), - begin, - end) + ctxt = context.get_admin_context() + instances = db.instance_get_active_by_window_joined(ctxt, begin, end) print "%s instances" % len(instances) for instance_ref in instances: usage_info = utils.usage_from_instance(instance_ref, diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index 0a789b4b9..23fb42fb5 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -113,11 +113,10 @@ class AjaxConsoleProxy(object): AjaxConsoleProxy.tokens[kwargs['token']] = \ {'args': kwargs, 'last_activity': time.time()} - conn = rpc.create_connection(new=True) - consumer = rpc.create_consumer( - conn, - FLAGS.ajax_console_proxy_topic, - TopicProxy) + self.conn = rpc.create_connection(new=True) + self.conn.create_consumer( + FLAGS.ajax_console_proxy_topic, + TopicProxy) def delete_expired_tokens(): now = time.time() @@ -129,7 +128,7 @@ class AjaxConsoleProxy(object): for k in to_delete: del AjaxConsoleProxy.tokens[k] - utils.LoopingCall(consumer.fetch, enable_callbacks=True).start(0.1) + self.conn.consume_in_thread() utils.LoopingCall(delete_expired_tokens).start(1) if __name__ == '__main__': @@ -142,3 +141,4 @@ if __name__ == '__main__': server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port) service.serve(server) service.wait() + self.conn.close() diff --git a/bin/nova-manage b/bin/nova-manage index 890cde0b8..c3b2c71ce 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -53,6 +53,7 @@ CLI interface for nova management. """ +import ast import gettext import glob import json @@ -85,11 +86,13 @@ from nova import quota from nova import rpc from nova import utils from nova import version +from nova import vsa from nova.api.ec2 import ec2utils from nova.auth import manager from nova.cloudpipe import pipelib from nova.compute import instance_types from nova.db import migration +from nova.volume import volume_types FLAGS = flags.FLAGS flags.DECLARE('fixed_range', 'nova.network.manager') @@ -163,7 +166,7 @@ class VpnCommands(object): print address, print vpn['host'], print ec2utils.id_to_ec2_id(vpn['id']), - print vpn['state_description'], + print vpn['vm_state'], print state else: print None @@ -866,7 +869,7 @@ class VmCommands(object): instance['hostname'], instance['host'], instance['instance_type'].name, - instance['state_description'], + instance['vm_state'], instance['launched_at'], instance['image_ref'], instance['kernel_id'], @@ -1097,6 +1100,477 @@ class VersionCommands(object): self.list() +class VsaCommands(object): + """Methods for dealing with VSAs""" + + def __init__(self, *args, **kwargs): + self.manager = manager.AuthManager() + self.vsa_api = vsa.API() + self.context = context.get_admin_context() + + self._format_str_vsa = "%(id)-5s %(vsa_id)-15s %(name)-25s "\ + "%(type)-10s %(vcs)-6s %(drives)-9s %(stat)-10s "\ + "%(az)-10s %(time)-10s" + self._format_str_volume = "\t%(id)-4s %(name)-15s %(size)-5s "\ + "%(stat)-10s %(att)-20s %(time)s" + self._format_str_drive = "\t%(id)-4s %(name)-15s %(size)-5s "\ + "%(stat)-10s %(host)-20s %(type)-4s %(tname)-10s %(time)s" + self._format_str_instance = "\t%(id)-4s %(name)-10s %(dname)-20s "\ + "%(image)-12s %(type)-10s %(fl_ip)-15s %(fx_ip)-15s "\ + "%(stat)-10s %(host)-15s %(time)s" + + def _print_vsa_header(self): + print self._format_str_vsa %\ + dict(id=_('ID'), + vsa_id=_('vsa_id'), + name=_('displayName'), + type=_('vc_type'), + vcs=_('vc_cnt'), + drives=_('drive_cnt'), + stat=_('status'), + az=_('AZ'), + time=_('createTime')) + + def _print_vsa(self, vsa): + print self._format_str_vsa %\ + dict(id=vsa['id'], + vsa_id=vsa['name'], + name=vsa['display_name'], + type=vsa['vsa_instance_type'].get('name', None), + vcs=vsa['vc_count'], + drives=vsa['vol_count'], + stat=vsa['status'], + az=vsa['availability_zone'], + time=str(vsa['created_at'])) + + def _print_volume_header(self): + print _(' === Volumes ===') + print self._format_str_volume %\ + dict(id=_('ID'), + name=_('name'), + size=_('size'), + stat=_('status'), + att=_('attachment'), + time=_('createTime')) + + def _print_volume(self, vol): + print self._format_str_volume %\ + dict(id=vol['id'], + name=vol['display_name'] or vol['name'], + size=vol['size'], + stat=vol['status'], + att=vol['attach_status'], + time=str(vol['created_at'])) + + def _print_drive_header(self): + print _(' === Drives ===') + print self._format_str_drive %\ + dict(id=_('ID'), + name=_('name'), + size=_('size'), + stat=_('status'), + host=_('host'), + type=_('type'), + tname=_('typeName'), + time=_('createTime')) + + def _print_drive(self, drive): + if drive['volume_type_id'] is not None and drive.get('volume_type'): + drive_type_name = drive['volume_type'].get('name') + else: + drive_type_name = '' + + print self._format_str_drive %\ + dict(id=drive['id'], + name=drive['display_name'], + size=drive['size'], + stat=drive['status'], + host=drive['host'], + type=drive['volume_type_id'], + tname=drive_type_name, + time=str(drive['created_at'])) + + def _print_instance_header(self): + print _(' === Instances ===') + print self._format_str_instance %\ + dict(id=_('ID'), + name=_('name'), + dname=_('disp_name'), + image=_('image'), + type=_('type'), + fl_ip=_('floating_IP'), + fx_ip=_('fixed_IP'), + stat=_('status'), + host=_('host'), + time=_('createTime')) + + def _print_instance(self, vc): + + fixed_addr = None + floating_addr = None + if vc['fixed_ips']: + fixed = vc['fixed_ips'][0] + fixed_addr = fixed['address'] + if fixed['floating_ips']: + floating_addr = fixed['floating_ips'][0]['address'] + floating_addr = floating_addr or fixed_addr + + print self._format_str_instance %\ + dict(id=vc['id'], + name=ec2utils.id_to_ec2_id(vc['id']), + dname=vc['display_name'], + image=('ami-%08x' % int(vc['image_ref'])), + type=vc['instance_type']['name'], + fl_ip=floating_addr, + fx_ip=fixed_addr, + stat=vc['vm_state'], + host=vc['host'], + time=str(vc['created_at'])) + + def _list(self, context, vsas, print_drives=False, + print_volumes=False, print_instances=False): + if vsas: + self._print_vsa_header() + + for vsa in vsas: + self._print_vsa(vsa) + vsa_id = vsa.get('id') + + if print_instances: + instances = self.vsa_api.get_all_vsa_instances(context, vsa_id) + if instances: + print + self._print_instance_header() + for instance in instances: + self._print_instance(instance) + print + + if print_drives: + drives = self.vsa_api.get_all_vsa_drives(context, vsa_id) + if drives: + self._print_drive_header() + for drive in drives: + self._print_drive(drive) + print + + if print_volumes: + volumes = self.vsa_api.get_all_vsa_volumes(context, vsa_id) + if volumes: + self._print_volume_header() + for volume in volumes: + self._print_volume(volume) + print + + @args('--storage', dest='storage', + metavar="[{'drive_name': 'type', 'num_drives': N, 'size': M},..]", + help='Initial storage allocation for VSA') + @args('--name', dest='name', metavar="<name>", help='VSA name') + @args('--description', dest='description', metavar="<description>", + help='VSA description') + @args('--vc', dest='vc_count', metavar="<number>", help='Number of VCs') + @args('--instance_type', dest='instance_type_name', metavar="<name>", + help='Instance type name') + @args('--image', dest='image_name', metavar="<name>", help='Image name') + @args('--shared', dest='shared', action="store_true", default=False, + help='Use shared drives') + @args('--az', dest='az', metavar="<zone:host>", help='Availability zone') + @args('--user', dest="user_id", metavar='<User name>', + help='User name') + @args('--project', dest="project_id", metavar='<Project name>', + help='Project name') + def create(self, storage='[]', name=None, description=None, vc_count=1, + instance_type_name=None, image_name=None, shared=None, + az=None, user_id=None, project_id=None): + """Create a VSA.""" + + if project_id is None: + try: + project_id = os.getenv("EC2_ACCESS_KEY").split(':')[1] + except Exception as exc: + print _("Failed to retrieve project id: %(exc)s") % exc + raise + + if user_id is None: + try: + project = self.manager.get_project(project_id) + user_id = project.project_manager_id + except Exception as exc: + print _("Failed to retrieve user info: %(exc)s") % exc + raise + + is_admin = self.manager.is_admin(user_id) + ctxt = context.RequestContext(user_id, project_id, is_admin) + if not is_admin and \ + not self.manager.is_project_member(user_id, project_id): + msg = _("%(user_id)s must be an admin or a " + "member of %(project_id)s") + LOG.warn(msg % locals()) + raise ValueError(msg % locals()) + + # Sanity check for storage string + storage_list = [] + if storage is not None: + try: + storage_list = ast.literal_eval(storage) + except: + print _("Invalid string format %s") % storage + raise + + for node in storage_list: + if ('drive_name' not in node) or ('num_drives' not in node): + print (_("Invalid string format for element %s. " \ + "Expecting keys 'drive_name' & 'num_drives'"), + str(node)) + raise KeyError + + if instance_type_name == '': + instance_type_name = None + instance_type = instance_types.get_instance_type_by_name( + instance_type_name) + + if image_name == '': + image_name = None + + if shared in [None, False, "--full_drives"]: + shared = False + elif shared in [True, "--shared"]: + shared = True + else: + raise ValueError(_('Shared parameter should be set either to "\ + "--shared or --full_drives')) + + values = { + 'display_name': name, + 'display_description': description, + 'vc_count': int(vc_count), + 'instance_type': instance_type, + 'image_name': image_name, + 'availability_zone': az, + 'storage': storage_list, + 'shared': shared, + } + + result = self.vsa_api.create(ctxt, **values) + self._list(ctxt, [result]) + + @args('--id', dest='vsa_id', metavar="<vsa_id>", help='VSA ID') + @args('--name', dest='name', metavar="<name>", help='VSA name') + @args('--description', dest='description', metavar="<description>", + help='VSA description') + @args('--vc', dest='vc_count', metavar="<number>", help='Number of VCs') + def update(self, vsa_id, name=None, description=None, vc_count=None): + """Updates name/description of vsa and number of VCs.""" + + values = {} + if name is not None: + values['display_name'] = name + if description is not None: + values['display_description'] = description + if vc_count is not None: + values['vc_count'] = int(vc_count) + + vsa_id = ec2utils.ec2_id_to_id(vsa_id) + result = self.vsa_api.update(self.context, vsa_id=vsa_id, **values) + self._list(self.context, [result]) + + @args('--id', dest='vsa_id', metavar="<vsa_id>", help='VSA ID') + def delete(self, vsa_id): + """Delete a VSA.""" + vsa_id = ec2utils.ec2_id_to_id(vsa_id) + self.vsa_api.delete(self.context, vsa_id) + + @args('--id', dest='vsa_id', metavar="<vsa_id>", + help='VSA ID (optional)') + @args('--all', dest='all', action="store_true", default=False, + help='Show all available details') + @args('--drives', dest='drives', action="store_true", + help='Include drive-level details') + @args('--volumes', dest='volumes', action="store_true", + help='Include volume-level details') + @args('--instances', dest='instances', action="store_true", + help='Include instance-level details') + def list(self, vsa_id=None, all=False, + drives=False, volumes=False, instances=False): + """Describe all available VSAs (or particular one).""" + + vsas = [] + if vsa_id is not None: + internal_id = ec2utils.ec2_id_to_id(vsa_id) + vsa = self.vsa_api.get(self.context, internal_id) + vsas.append(vsa) + else: + vsas = self.vsa_api.get_all(self.context) + + if all: + drives = volumes = instances = True + + self._list(self.context, vsas, drives, volumes, instances) + + def update_capabilities(self): + """Forces updates capabilities on all nova-volume nodes.""" + + rpc.fanout_cast(context.get_admin_context(), + FLAGS.volume_topic, + {"method": "notification", + "args": {"event": "startup"}}) + + +class VsaDriveTypeCommands(object): + """Methods for dealing with VSA drive types""" + + def __init__(self, *args, **kwargs): + super(VsaDriveTypeCommands, self).__init__(*args, **kwargs) + self.context = context.get_admin_context() + self._drive_type_template = '%s_%sGB_%sRPM' + + def _list(self, drives): + format_str = "%-5s %-30s %-10s %-10s %-10s %-20s %-10s %s" + if len(drives): + print format_str %\ + (_('ID'), + _('name'), + _('type'), + _('size_gb'), + _('rpm'), + _('capabilities'), + _('visible'), + _('createTime')) + + for name, vol_type in drives.iteritems(): + drive = vol_type.get('extra_specs') + print format_str %\ + (str(vol_type['id']), + drive['drive_name'], + drive['drive_type'], + drive['drive_size'], + drive['drive_rpm'], + drive.get('capabilities', ''), + str(drive.get('visible', '')), + str(vol_type['created_at'])) + + @args('--type', dest='type', metavar="<type>", + help='Drive type (SATA, SAS, SSD, etc.)') + @args('--size', dest='size_gb', metavar="<gb>", help='Drive size in GB') + @args('--rpm', dest='rpm', metavar="<rpm>", help='RPM') + @args('--capabilities', dest='capabilities', default=None, + metavar="<string>", help='Different capabilities') + @args('--hide', dest='hide', action="store_true", default=False, + help='Show or hide drive') + @args('--name', dest='name', metavar="<name>", help='Drive name') + def create(self, type, size_gb, rpm, capabilities=None, + hide=False, name=None): + """Create drive type.""" + + hide = True if hide in [True, "True", "--hide", "hide"] else False + + if name is None: + name = self._drive_type_template % (type, size_gb, rpm) + + extra_specs = {'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': type, + 'drive_size': size_gb, + 'drive_rpm': rpm, + 'visible': True, + } + if hide: + extra_specs['visible'] = False + + if capabilities is not None and capabilities != '': + extra_specs['capabilities'] = capabilities + + volume_types.create(self.context, name, extra_specs) + result = volume_types.get_volume_type_by_name(self.context, name) + self._list({name: result}) + + @args('--name', dest='name', metavar="<name>", help='Drive name') + @args('--purge', action="store_true", dest='purge', default=False, + help='purge record from database') + def delete(self, name, purge): + """Marks instance types / flavors as deleted""" + try: + if purge: + volume_types.purge(self.context, name) + verb = "purged" + else: + volume_types.destroy(self.context, name) + verb = "deleted" + except exception.ApiError: + print "Valid volume type name is required" + sys.exit(1) + except exception.DBError, e: + print "DB Error: %s" % e + sys.exit(2) + except: + sys.exit(3) + else: + print "%s %s" % (name, verb) + + @args('--all', dest='all', action="store_true", default=False, + help='Show all drives (including invisible)') + @args('--name', dest='name', metavar="<name>", + help='Show only specified drive') + def list(self, all=False, name=None): + """Describe all available VSA drive types (or particular one).""" + + all = False if all in ["--all", False, "False"] else True + + search_opts = {'extra_specs': {'type': 'vsa_drive'}} + if name is not None: + search_opts['extra_specs']['name'] = name + + if all == False: + search_opts['extra_specs']['visible'] = '1' + + drives = volume_types.get_all_types(self.context, + search_opts=search_opts) + self._list(drives) + + @args('--name', dest='name', metavar="<name>", help='Drive name') + @args('--type', dest='type', metavar="<type>", + help='Drive type (SATA, SAS, SSD, etc.)') + @args('--size', dest='size_gb', metavar="<gb>", help='Drive size in GB') + @args('--rpm', dest='rpm', metavar="<rpm>", help='RPM') + @args('--capabilities', dest='capabilities', default=None, + metavar="<string>", help='Different capabilities') + @args('--visible', dest='visible', + metavar="<show|hide>", help='Show or hide drive') + def update(self, name, type=None, size_gb=None, rpm=None, + capabilities=None, visible=None): + """Update drive type.""" + + volume_type = volume_types.get_volume_type_by_name(self.context, name) + + extra_specs = {'type': 'vsa_drive'} + + if type: + extra_specs['drive_type'] = type + + if size_gb: + extra_specs['drive_size'] = size_gb + + if rpm: + extra_specs['drive_rpm'] = rpm + + if capabilities: + extra_specs['capabilities'] = capabilities + + if visible is not None: + if visible in ["show", True, "True"]: + extra_specs['visible'] = True + elif visible in ["hide", False, "False"]: + extra_specs['visible'] = False + else: + raise ValueError(_('visible parameter should be set to '\ + 'show or hide')) + + db.api.volume_type_extra_specs_update_or_create(self.context, + volume_type['id'], + extra_specs) + result = volume_types.get_volume_type_by_name(self.context, name) + self._list({name: result}) + + class VolumeCommands(object): """Methods for dealing with a cloud in an odd state""" @@ -1483,6 +1957,7 @@ CATEGORIES = [ ('agent', AgentBuildCommands), ('config', ConfigCommands), ('db', DbCommands), + ('drive', VsaDriveTypeCommands), ('fixed', FixedIpCommands), ('flavor', InstanceTypeCommands), ('floating', FloatingIpCommands), @@ -1498,7 +1973,8 @@ CATEGORIES = [ ('version', VersionCommands), ('vm', VmCommands), ('volume', VolumeCommands), - ('vpn', VpnCommands)] + ('vpn', VpnCommands), + ('vsa', VsaCommands)] def lazy_match(name, key_value_tuples): diff --git a/bin/nova-vsa b/bin/nova-vsa new file mode 100755 index 000000000..2d6eee2c0 --- /dev/null +++ b/bin/nova-vsa @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Nova VSA.""" + +import eventlet +eventlet.monkey_patch() + +import os +import sys + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + + +from nova import flags +from nova import log as logging +from nova import service +from nova import utils + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + utils.monkey_patch() + server = service.Service.create(binary='nova-vsa') + service.serve(server) + service.wait() diff --git a/contrib/nova.sh b/contrib/nova.sh index 7994e5133..16cddebd5 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -81,7 +81,7 @@ if [ "$CMD" == "install" ]; then sudo apt-get install -y python-netaddr python-pastedeploy python-eventlet sudo apt-get install -y python-novaclient python-glance python-cheetah sudo apt-get install -y python-carrot python-tempita python-sqlalchemy - sudo apt-get install -y python-suds + sudo apt-get install -y python-suds python-kombu if [ "$USE_IPV6" == 1 ]; then diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 5430f443d..3b217e62e 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -20,7 +20,10 @@ Starting point for routing EC2 requests. """ -import httplib2 +from urlparse import urlparse + +import eventlet +from eventlet.green import httplib import webob import webob.dec import webob.exc @@ -35,7 +38,6 @@ from nova.api.ec2 import apirequest from nova.api.ec2 import ec2utils from nova.auth import manager - FLAGS = flags.FLAGS LOG = logging.getLogger("nova.api") flags.DEFINE_integer('lockout_attempts', 5, @@ -158,7 +160,6 @@ class ToToken(wsgi.Middleware): auth_params.pop('Signature') # Authenticate the request. - client = httplib2.Http() creds = {'ec2Credentials': {'access': access, 'signature': signature, 'host': req.host, @@ -166,18 +167,24 @@ class ToToken(wsgi.Middleware): 'path': req.path, 'params': auth_params, }} - headers = {'Content-Type': 'application/json'}, - resp, content = client.request(FLAGS.keystone_ec2_url, - 'POST', - headers=headers, - body=utils.dumps(creds)) + creds_json = utils.dumps(creds) + headers = {'Content-Type': 'application/json'} + o = urlparse(FLAGS.keystone_ec2_url) + if o.scheme == "http": + conn = httplib.HTTPConnection(o.netloc) + else: + conn = httplib.HTTPSConnection(o.netloc) + conn.request('POST', o.path, body=creds_json, headers=headers) + response = conn.getresponse().read() + conn.close() + # NOTE(vish): We could save a call to keystone by # having keystone return token, tenant, # user, and roles from this call. - result = utils.loads(content) + result = utils.loads(response) # TODO(vish): check for errors - token_id = result['auth']['token']['id'] + token_id = result['auth']['token']['id'] # Authenticated! req.headers['X-Auth-Token'] = token_id return self.application @@ -392,18 +399,20 @@ class Executor(wsgi.Application): except exception.InstanceNotFound as ex: LOG.info(_('InstanceNotFound raised: %s'), unicode(ex), context=context) - return self._error(req, context, type(ex).__name__, ex.message) + ec2_id = ec2utils.id_to_ec2_id(ex.kwargs['instance_id']) + message = ex.message % {'instance_id': ec2_id} + return self._error(req, context, type(ex).__name__, message) except exception.VolumeNotFound as ex: LOG.info(_('VolumeNotFound raised: %s'), unicode(ex), context=context) - ec2_id = ec2utils.id_to_ec2_vol_id(ex.volume_id) - message = _('Volume %s not found') % ec2_id + ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id']) + message = ex.message % {'volume_id': ec2_id} return self._error(req, context, type(ex).__name__, message) except exception.SnapshotNotFound as ex: LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), context=context) - ec2_id = ec2utils.id_to_ec2_snap_id(ex.snapshot_id) - message = _('Snapshot %s not found') % ec2_id + ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id']) + message = ex.message % {'snapshot_id': ec2_id} return self._error(req, context, type(ex).__name__, message) except exception.NotFound as ex: LOG.info(_('NotFound raised: %s'), unicode(ex), context=context) diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index dfbbc0a2b..75e029509 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -21,7 +21,6 @@ Admin API controller, exposed through http via the api worker. """ import base64 -import datetime import netaddr import urllib @@ -33,6 +32,7 @@ from nova import log as logging from nova import utils from nova.api.ec2 import ec2utils from nova.auth import manager +from nova.compute import vm_states FLAGS = flags.FLAGS @@ -273,8 +273,7 @@ class AdminController(object): """Get the VPN instance for a project ID.""" for instance in db.instance_get_all_by_project(context, project_id): if (instance['image_id'] == str(FLAGS.vpn_image_id) - and not instance['state_description'] in - ['shutting_down', 'shutdown']): + and not instance['vm_state'] in [vm_states.DELETED]): return instance def start_vpn(self, context, project): diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 9aebf92e3..fe44191c8 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -47,6 +47,7 @@ from nova import utils from nova import volume from nova.api.ec2 import ec2utils from nova.compute import instance_types +from nova.compute import vm_states from nova.image import s3 @@ -78,6 +79,30 @@ def _gen_key(context, user_id, key_name): return {'private_key': private_key, 'fingerprint': fingerprint} +# EC2 API can return the following values as documented in the EC2 API +# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ +# ApiReference-ItemType-InstanceStateType.html +# pending | running | shutting-down | terminated | stopping | stopped +_STATE_DESCRIPTION_MAP = { + None: 'pending', + vm_states.ACTIVE: 'running', + vm_states.BUILDING: 'pending', + vm_states.REBUILDING: 'pending', + vm_states.DELETED: 'terminated', + vm_states.STOPPED: 'stopped', + vm_states.MIGRATING: 'migrate', + vm_states.RESIZING: 'resize', + vm_states.PAUSED: 'pause', + vm_states.SUSPENDED: 'suspend', + vm_states.RESCUED: 'rescue', +} + + +def state_description_from_vm_state(vm_state): + """Map the vm state to the server status string""" + return _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) + + # TODO(yamahata): hypervisor dependent default device name _DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1' _DEFAULT_MAPPINGS = {'ami': 'sda1', @@ -1039,11 +1064,12 @@ class CloudController(object): def _format_attr_instance_initiated_shutdown_behavior(instance, result): - state_description = instance['state_description'] - state_to_value = {'stopping': 'stop', - 'stopped': 'stop', - 'terminating': 'terminate'} - value = state_to_value.get(state_description) + vm_state = instance['vm_state'] + state_to_value = { + vm_states.STOPPED: 'stopped', + vm_states.DELETED: 'terminated', + } + value = state_to_value.get(vm_state) if value: result['instanceInitiatedShutdownBehavior'] = value @@ -1198,8 +1224,8 @@ class CloudController(object): self._format_kernel_id(instance, i, 'kernelId') self._format_ramdisk_id(instance, i, 'ramdiskId') i['instanceState'] = { - 'code': instance['state'], - 'name': instance['state_description']} + 'code': instance['power_state'], + 'name': state_description_from_vm_state(instance['vm_state'])} fixed_addr = None floating_addr = None if instance['fixed_ips']: @@ -1618,22 +1644,22 @@ class CloudController(object): # stop the instance if necessary restart_instance = False if not no_reboot: - state_description = instance['state_description'] + vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. - if state_description not in ('running', 'stopping', 'stopped'): + if vm_state not in (vm_states.ACTIVE, vm_states.STOPPED): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) - if state_description == 'running': + if vm_state == vm_states.ACTIVE: restart_instance = True self.compute_api.stop(context, instance_id=instance_id) # wait instance for really stopped start_time = time.time() - while state_description != 'stopped': + while vm_state != vm_states.STOPPED: time.sleep(1) instance = self.compute_api.get(context, instance_id) - state_description = instance['state_description'] + vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index c3379cd11..a836a584c 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -29,7 +29,8 @@ from nova import log as logging from nova import quota from nova.api.openstack import wsgi from nova.api.openstack import xmlutil -from nova.compute import power_state as compute_power_state +from nova.compute import vm_states +from nova.compute import task_states LOG = logging.getLogger('nova.api.openstack.common') @@ -40,36 +41,61 @@ XML_NS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1' -_STATUS_MAP = { - None: 'BUILD', - compute_power_state.NOSTATE: 'BUILD', - compute_power_state.RUNNING: 'ACTIVE', - compute_power_state.BLOCKED: 'ACTIVE', - compute_power_state.SUSPENDED: 'SUSPENDED', - compute_power_state.PAUSED: 'PAUSED', - compute_power_state.SHUTDOWN: 'SHUTDOWN', - compute_power_state.SHUTOFF: 'SHUTOFF', - compute_power_state.CRASHED: 'ERROR', - compute_power_state.FAILED: 'ERROR', - compute_power_state.BUILDING: 'BUILD', +_STATE_MAP = { + vm_states.ACTIVE: { + 'default': 'ACTIVE', + task_states.REBOOTING: 'REBOOT', + task_states.UPDATING_PASSWORD: 'PASSWORD', + task_states.RESIZE_VERIFY: 'VERIFY_RESIZE', + }, + vm_states.BUILDING: { + 'default': 'BUILD', + }, + vm_states.REBUILDING: { + 'default': 'REBUILD', + }, + vm_states.STOPPED: { + 'default': 'STOPPED', + }, + vm_states.MIGRATING: { + 'default': 'MIGRATING', + }, + vm_states.RESIZING: { + 'default': 'RESIZE', + }, + vm_states.PAUSED: { + 'default': 'PAUSED', + }, + vm_states.SUSPENDED: { + 'default': 'SUSPENDED', + }, + vm_states.RESCUED: { + 'default': 'RESCUE', + }, + vm_states.ERROR: { + 'default': 'ERROR', + }, + vm_states.DELETED: { + 'default': 'DELETED', + }, } -def status_from_power_state(power_state): - """Map the power state to the server status string""" - return _STATUS_MAP[power_state] +def status_from_state(vm_state, task_state='default'): + """Given vm_state and task_state, return a status string.""" + task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN_STATE')) + status = task_map.get(task_state, task_map['default']) + LOG.debug("Generated %(status)s from vm_state=%(vm_state)s " + "task_state=%(task_state)s." % locals()) + return status -def power_states_from_status(status): - """Map the server status string to a list of power states""" - power_states = [] - for power_state, status_map in _STATUS_MAP.iteritems(): - # Skip the 'None' state - if power_state is None: - continue - if status.lower() == status_map.lower(): - power_states.append(power_state) - return power_states +def vm_state_from_status(status): + """Map the server status string to a vm state.""" + for state, task_map in _STATE_MAP.iteritems(): + status_string = task_map.get("default") + if status.lower() == status_string.lower(): + return state def get_pagination_params(request): diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 40086f778..d1add8f83 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -36,9 +36,9 @@ def _translate_floating_ip_view(floating_ip): result['fixed_ip'] = floating_ip['fixed_ip']['address'] except (TypeError, KeyError): result['fixed_ip'] = None - if 'instance' in floating_ip: - result['instance_id'] = floating_ip['instance']['id'] - else: + try: + result['instance_id'] = floating_ip['fixed_ip']['instance_id'] + except (TypeError, KeyError): result['instance_id'] = None return {'floating_ip': result} @@ -96,7 +96,8 @@ class FloatingIPController(object): except rpc.RemoteError as ex: # NOTE(tr3buchet) - why does this block exist? if ex.exc_type == 'NoMoreFloatingIps': - raise exception.NoMoreFloatingIps() + msg = _("No more floating ips available.") + raise webob.exc.HTTPBadRequest(explanation=msg) else: raise @@ -138,7 +139,11 @@ class Floating_ips(extensions.ExtensionDescriptor): msg = _("Address not specified") raise webob.exc.HTTPBadRequest(explanation=msg) - self.compute_api.associate_floating_ip(context, instance_id, address) + try: + self.compute_api.associate_floating_ip(context, instance_id, + address) + except exception.ApiError, e: + raise webob.exc.HTTPBadRequest(explanation=e.message) return webob.Response(status_int=202) diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py new file mode 100644 index 000000000..42691a9fa --- /dev/null +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -0,0 +1,236 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import urlparse +import webob + +from datetime import datetime +from nova import exception +from nova import flags +from nova.compute import api +from nova.api.openstack import extensions +from nova.api.openstack import views +from nova.db.sqlalchemy.session import get_session +from webob import exc + + +FLAGS = flags.FLAGS + + +class SimpleTenantUsageController(object): + def _hours_for(self, instance, period_start, period_stop): + launched_at = instance['launched_at'] + terminated_at = instance['terminated_at'] + if terminated_at is not None: + if not isinstance(terminated_at, datetime): + terminated_at = datetime.strptime(terminated_at, + "%Y-%m-%d %H:%M:%S.%f") + + if launched_at is not None: + if not isinstance(launched_at, datetime): + launched_at = datetime.strptime(launched_at, + "%Y-%m-%d %H:%M:%S.%f") + + if terminated_at and terminated_at < period_start: + return 0 + # nothing if it started after the usage report ended + if launched_at and launched_at > period_stop: + return 0 + if launched_at: + # if instance launched after period_started, don't charge for first + start = max(launched_at, period_start) + if terminated_at: + # if instance stopped before period_stop, don't charge after + stop = min(period_stop, terminated_at) + else: + # instance is still running, so charge them up to current time + stop = period_stop + dt = stop - start + seconds = dt.days * 3600 * 24 + dt.seconds\ + + dt.microseconds / 100000.0 + + return seconds / 3600.0 + else: + # instance hasn't launched, so no charge + return 0 + + def _tenant_usages_for_period(self, context, period_start, + period_stop, tenant_id=None, detailed=True): + + compute_api = api.API() + instances = compute_api.get_active_by_window(context, + period_start, + period_stop, + tenant_id) + from nova import log as logging + logging.info(instances) + rval = {} + flavors = {} + + for instance in instances: + info = {} + info['hours'] = self._hours_for(instance, + period_start, + period_stop) + flavor_type = instance['instance_type_id'] + + if not flavors.get(flavor_type): + try: + it_ref = compute_api.get_instance_type(context, + flavor_type) + flavors[flavor_type] = it_ref + except exception.InstanceTypeNotFound: + # can't bill if there is no instance type + continue + + flavor = flavors[flavor_type] + + info['name'] = instance['display_name'] + + info['memory_mb'] = flavor['memory_mb'] + info['local_gb'] = flavor['local_gb'] + info['vcpus'] = flavor['vcpus'] + + info['tenant_id'] = instance['project_id'] + + info['flavor'] = flavor['name'] + + info['started_at'] = instance['launched_at'] + + info['ended_at'] = instance['terminated_at'] + + if info['ended_at']: + info['state'] = 'terminated' + else: + info['state'] = instance['vm_state'] + + now = datetime.utcnow() + + if info['state'] == 'terminated': + delta = info['ended_at'] - info['started_at'] + else: + delta = now - info['started_at'] + + info['uptime'] = delta.days * 24 * 60 + delta.seconds + + if not info['tenant_id'] in rval: + summary = {} + summary['tenant_id'] = info['tenant_id'] + if detailed: + summary['server_usages'] = [] + summary['total_local_gb_usage'] = 0 + summary['total_vcpus_usage'] = 0 + summary['total_memory_mb_usage'] = 0 + summary['total_hours'] = 0 + summary['start'] = period_start + summary['stop'] = period_stop + rval[info['tenant_id']] = summary + + summary = rval[info['tenant_id']] + summary['total_local_gb_usage'] += info['local_gb'] * info['hours'] + summary['total_vcpus_usage'] += info['vcpus'] * info['hours'] + summary['total_memory_mb_usage'] += info['memory_mb']\ + * info['hours'] + + summary['total_hours'] += info['hours'] + if detailed: + summary['server_usages'].append(info) + + return rval.values() + + def _parse_datetime(self, dtstr): + if isinstance(dtstr, datetime): + return dtstr + try: + return datetime.strptime(dtstr, "%Y-%m-%dT%H:%M:%S") + except: + try: + return datetime.strptime(dtstr, "%Y-%m-%dT%H:%M:%S.%f") + except: + return datetime.strptime(dtstr, "%Y-%m-%d %H:%M:%S.%f") + + def _get_datetime_range(self, req): + qs = req.environ.get('QUERY_STRING', '') + env = urlparse.parse_qs(qs) + period_start = self._parse_datetime(env.get('start', + [datetime.utcnow().isoformat()])[0]) + period_stop = self._parse_datetime(env.get('end', + [datetime.utcnow().isoformat()])[0]) + + detailed = bool(env.get('detailed', False)) + return (period_start, period_stop, detailed) + + def index(self, req): + """Retrive tenant_usage for all tenants""" + context = req.environ['nova.context'] + + if not context.is_admin and FLAGS.allow_admin_api: + return webob.Response(status_int=403) + + (period_start, period_stop, detailed) = self._get_datetime_range(req) + usages = self._tenant_usages_for_period(context, + period_start, + period_stop, + detailed=detailed) + return {'tenant_usages': usages} + + def show(self, req, id): + """Retrive tenant_usage for a specified tenant""" + tenant_id = id + context = req.environ['nova.context'] + + if not context.is_admin and FLAGS.allow_admin_api: + if tenant_id != context.project_id: + return webob.Response(status_int=403) + + (period_start, period_stop, ignore) = self._get_datetime_range(req) + usage = self._tenant_usages_for_period(context, + period_start, + period_stop, + tenant_id=tenant_id, + detailed=True) + if len(usage): + usage = usage[0] + else: + usage = {} + return {'tenant_usage': usage} + + +class Simple_tenant_usage(extensions.ExtensionDescriptor): + def get_name(self): + return "SimpleTenantUsage" + + def get_alias(self): + return "os-simple-tenant-usage" + + def get_description(self): + return "Simple tenant usage extension" + + def get_namespace(self): + return "http://docs.openstack.org/ext/os-simple-tenant-usage/api/v1.1" + + def get_updated(self): + return "2011-08-19T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-simple-tenant-usage', + SimpleTenantUsageController()) + resources.append(res) + + return resources diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py new file mode 100644 index 000000000..e09736a28 --- /dev/null +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -0,0 +1,606 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" The virtul storage array extension""" + + +from webob import exc + +from nova import vsa +from nova import volume +from nova import compute +from nova import network +from nova import db +from nova import quota +from nova import exception +from nova import log as logging +from nova.api.openstack import common +from nova.api.openstack import extensions +from nova.api.openstack import faults +from nova.api.openstack import wsgi +from nova.api.openstack import servers +from nova.api.openstack.contrib import volumes +from nova.compute import instance_types + +from nova import flags +FLAGS = flags.FLAGS + +LOG = logging.getLogger("nova.api.vsa") + + +def _vsa_view(context, vsa, details=False, instances=None): + """Map keys for vsa summary/detailed view.""" + d = {} + + d['id'] = vsa.get('id') + d['name'] = vsa.get('name') + d['displayName'] = vsa.get('display_name') + d['displayDescription'] = vsa.get('display_description') + + d['createTime'] = vsa.get('created_at') + d['status'] = vsa.get('status') + + if 'vsa_instance_type' in vsa: + d['vcType'] = vsa['vsa_instance_type'].get('name', None) + else: + d['vcType'] = vsa['instance_type_id'] + + d['vcCount'] = vsa.get('vc_count') + d['driveCount'] = vsa.get('vol_count') + + d['ipAddress'] = None + for instance in instances: + fixed_addr = None + floating_addr = None + if instance['fixed_ips']: + fixed = instance['fixed_ips'][0] + fixed_addr = fixed['address'] + if fixed['floating_ips']: + floating_addr = fixed['floating_ips'][0]['address'] + + if floating_addr: + d['ipAddress'] = floating_addr + break + else: + d['ipAddress'] = d['ipAddress'] or fixed_addr + + return d + + +class VsaController(object): + """The Virtual Storage Array API controller for the OpenStack API.""" + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "vsa": [ + "id", + "name", + "displayName", + "displayDescription", + "createTime", + "status", + "vcType", + "vcCount", + "driveCount", + "ipAddress", + ]}}} + + def __init__(self): + self.vsa_api = vsa.API() + self.compute_api = compute.API() + self.network_api = network.API() + super(VsaController, self).__init__() + + def _get_instances_by_vsa_id(self, context, id): + return self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(id))}) + + def _items(self, req, details): + """Return summary or detailed list of VSAs.""" + context = req.environ['nova.context'] + vsas = self.vsa_api.get_all(context) + limited_list = common.limited(vsas, req) + + vsa_list = [] + for vsa in limited_list: + instances = self._get_instances_by_vsa_id(context, vsa.get('id')) + vsa_list.append(_vsa_view(context, vsa, details, instances)) + return {'vsaSet': vsa_list} + + def index(self, req): + """Return a short list of VSAs.""" + return self._items(req, details=False) + + def detail(self, req): + """Return a detailed list of VSAs.""" + return self._items(req, details=True) + + def show(self, req, id): + """Return data about the given VSA.""" + context = req.environ['nova.context'] + + try: + vsa = self.vsa_api.get(context, vsa_id=id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + instances = self._get_instances_by_vsa_id(context, vsa.get('id')) + return {'vsa': _vsa_view(context, vsa, True, instances)} + + def create(self, req, body): + """Create a new VSA.""" + context = req.environ['nova.context'] + + if not body or 'vsa' not in body: + LOG.debug(_("No body provided"), context=context) + return faults.Fault(exc.HTTPUnprocessableEntity()) + + vsa = body['vsa'] + + display_name = vsa.get('displayName') + vc_type = vsa.get('vcType', FLAGS.default_vsa_instance_type) + try: + instance_type = instance_types.get_instance_type_by_name(vc_type) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + LOG.audit(_("Create VSA %(display_name)s of type %(vc_type)s"), + locals(), context=context) + + args = dict(display_name=display_name, + display_description=vsa.get('displayDescription'), + instance_type=instance_type, + storage=vsa.get('storage'), + shared=vsa.get('shared'), + availability_zone=vsa.get('placement', {}).\ + get('AvailabilityZone')) + + vsa = self.vsa_api.create(context, **args) + + instances = self._get_instances_by_vsa_id(context, vsa.get('id')) + return {'vsa': _vsa_view(context, vsa, True, instances)} + + def delete(self, req, id): + """Delete a VSA.""" + context = req.environ['nova.context'] + + LOG.audit(_("Delete VSA with id: %s"), id, context=context) + + try: + self.vsa_api.delete(context, vsa_id=id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + def associate_address(self, req, id, body): + """ /zadr-vsa/{vsa_id}/associate_address + auto or manually associate an IP to VSA + """ + context = req.environ['nova.context'] + + if body is None: + ip = 'auto' + else: + ip = body.get('ipAddress', 'auto') + + LOG.audit(_("Associate address %(ip)s to VSA %(id)s"), + locals(), context=context) + + try: + instances = self._get_instances_by_vsa_id(context, id) + if instances is None or len(instances) == 0: + return faults.Fault(exc.HTTPNotFound()) + + for instance in instances: + self.network_api.allocate_for_instance(context, instance, + vpn=False) + # Placeholder + return + + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + def disassociate_address(self, req, id, body): + """ /zadr-vsa/{vsa_id}/disassociate_address + auto or manually associate an IP to VSA + """ + context = req.environ['nova.context'] + + if body is None: + ip = 'auto' + else: + ip = body.get('ipAddress', 'auto') + + LOG.audit(_("Disassociate address from VSA %(id)s"), + locals(), context=context) + # Placeholder + + +class VsaVolumeDriveController(volumes.VolumeController): + """The base class for VSA volumes & drives. + + A child resource of the VSA object. Allows operations with + volumes and drives created to/from particular VSA + + """ + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "volume": [ + "id", + "name", + "status", + "size", + "availabilityZone", + "createdAt", + "displayName", + "displayDescription", + "vsaId", + ]}}} + + def __init__(self): + self.volume_api = volume.API() + self.vsa_api = vsa.API() + super(VsaVolumeDriveController, self).__init__() + + def _translation(self, context, vol, vsa_id, details): + if details: + translation = volumes._translate_volume_detail_view + else: + translation = volumes._translate_volume_summary_view + + d = translation(context, vol) + d['vsaId'] = vsa_id + d['name'] = vol['name'] + return d + + def _check_volume_ownership(self, context, vsa_id, id): + obj = self.object + try: + volume_ref = self.volume_api.get(context, volume_id=id) + except exception.NotFound: + LOG.error(_("%(obj)s with ID %(id)s not found"), locals()) + raise + + own_vsa_id = self.volume_api.get_volume_metadata_value(volume_ref, + self.direction) + if own_vsa_id != vsa_id: + LOG.error(_("%(obj)s with ID %(id)s belongs to VSA %(own_vsa_id)s"\ + " and not to VSA %(vsa_id)s."), locals()) + raise exception.Invalid() + + def _items(self, req, vsa_id, details): + """Return summary or detailed list of volumes for particular VSA.""" + context = req.environ['nova.context'] + + vols = self.volume_api.get_all(context, + search_opts={'metadata': {self.direction: str(vsa_id)}}) + limited_list = common.limited(vols, req) + + res = [self._translation(context, vol, vsa_id, details) \ + for vol in limited_list] + + return {self.objects: res} + + def index(self, req, vsa_id): + """Return a short list of volumes created from particular VSA.""" + LOG.audit(_("Index. vsa_id=%(vsa_id)s"), locals()) + return self._items(req, vsa_id, details=False) + + def detail(self, req, vsa_id): + """Return a detailed list of volumes created from particular VSA.""" + LOG.audit(_("Detail. vsa_id=%(vsa_id)s"), locals()) + return self._items(req, vsa_id, details=True) + + def create(self, req, vsa_id, body): + """Create a new volume from VSA.""" + LOG.audit(_("Create. vsa_id=%(vsa_id)s, body=%(body)s"), locals()) + context = req.environ['nova.context'] + + if not body: + return faults.Fault(exc.HTTPUnprocessableEntity()) + + vol = body[self.object] + size = vol['size'] + LOG.audit(_("Create volume of %(size)s GB from VSA ID %(vsa_id)s"), + locals(), context=context) + try: + # create is supported for volumes only (drives created through VSA) + volume_type = self.vsa_api.get_vsa_volume_type(context) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + new_volume = self.volume_api.create(context, + size, + None, + vol.get('displayName'), + vol.get('displayDescription'), + volume_type=volume_type, + metadata=dict(from_vsa_id=str(vsa_id))) + + return {self.object: self._translation(context, new_volume, + vsa_id, True)} + + def update(self, req, vsa_id, id, body): + """Update a volume.""" + context = req.environ['nova.context'] + + try: + self._check_volume_ownership(context, vsa_id, id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + except exception.Invalid: + return faults.Fault(exc.HTTPBadRequest()) + + vol = body[self.object] + updatable_fields = [{'displayName': 'display_name'}, + {'displayDescription': 'display_description'}, + {'status': 'status'}, + {'providerLocation': 'provider_location'}, + {'providerAuth': 'provider_auth'}] + changes = {} + for field in updatable_fields: + key = field.keys()[0] + val = field[key] + if key in vol: + changes[val] = vol[key] + + obj = self.object + LOG.audit(_("Update %(obj)s with id: %(id)s, changes: %(changes)s"), + locals(), context=context) + + try: + self.volume_api.update(context, volume_id=id, fields=changes) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPAccepted() + + def delete(self, req, vsa_id, id): + """Delete a volume.""" + context = req.environ['nova.context'] + + LOG.audit(_("Delete. vsa_id=%(vsa_id)s, id=%(id)s"), locals()) + + try: + self._check_volume_ownership(context, vsa_id, id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + except exception.Invalid: + return faults.Fault(exc.HTTPBadRequest()) + + return super(VsaVolumeDriveController, self).delete(req, id) + + def show(self, req, vsa_id, id): + """Return data about the given volume.""" + context = req.environ['nova.context'] + + LOG.audit(_("Show. vsa_id=%(vsa_id)s, id=%(id)s"), locals()) + + try: + self._check_volume_ownership(context, vsa_id, id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + except exception.Invalid: + return faults.Fault(exc.HTTPBadRequest()) + + return super(VsaVolumeDriveController, self).show(req, id) + + +class VsaVolumeController(VsaVolumeDriveController): + """The VSA volume API controller for the Openstack API. + + A child resource of the VSA object. Allows operations with volumes created + by particular VSA + + """ + + def __init__(self): + self.direction = 'from_vsa_id' + self.objects = 'volumes' + self.object = 'volume' + super(VsaVolumeController, self).__init__() + + +class VsaDriveController(VsaVolumeDriveController): + """The VSA Drive API controller for the Openstack API. + + A child resource of the VSA object. Allows operations with drives created + for particular VSA + + """ + + def __init__(self): + self.direction = 'to_vsa_id' + self.objects = 'drives' + self.object = 'drive' + super(VsaDriveController, self).__init__() + + def create(self, req, vsa_id, body): + """Create a new drive for VSA. Should be done through VSA APIs""" + return faults.Fault(exc.HTTPBadRequest()) + + def update(self, req, vsa_id, id, body): + """Update a drive. Should be done through VSA APIs""" + return faults.Fault(exc.HTTPBadRequest()) + + def delete(self, req, vsa_id, id): + """Delete a volume. Should be done through VSA APIs""" + return faults.Fault(exc.HTTPBadRequest()) + + +class VsaVPoolController(object): + """The vPool VSA API controller for the OpenStack API.""" + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "vpool": [ + "id", + "vsaId", + "name", + "displayName", + "displayDescription", + "driveCount", + "driveIds", + "protection", + "stripeSize", + "stripeWidth", + "createTime", + "status", + ]}}} + + def __init__(self): + self.vsa_api = vsa.API() + super(VsaVPoolController, self).__init__() + + def index(self, req, vsa_id): + """Return a short list of vpools created from particular VSA.""" + return {'vpools': []} + + def create(self, req, vsa_id, body): + """Create a new vPool for VSA.""" + return faults.Fault(exc.HTTPBadRequest()) + + def update(self, req, vsa_id, id, body): + """Update vPool parameters.""" + return faults.Fault(exc.HTTPBadRequest()) + + def delete(self, req, vsa_id, id): + """Delete a vPool.""" + return faults.Fault(exc.HTTPBadRequest()) + + def show(self, req, vsa_id, id): + """Return data about the given vPool.""" + return faults.Fault(exc.HTTPBadRequest()) + + +class VsaVCController(servers.ControllerV11): + """The VSA Virtual Controller API controller for the OpenStack API.""" + + def __init__(self): + self.vsa_api = vsa.API() + self.compute_api = compute.API() + self.vsa_id = None # VP-TODO: temporary ugly hack + super(VsaVCController, self).__init__() + + def _get_servers(self, req, is_detail): + """Returns a list of servers, taking into account any search + options specified. + """ + + if self.vsa_id is None: + super(VsaVCController, self)._get_servers(req, is_detail) + + context = req.environ['nova.context'] + + search_opts = {'metadata': dict(vsa_id=str(self.vsa_id))} + instance_list = self.compute_api.get_all( + context, search_opts=search_opts) + + limited_list = self._limit_items(instance_list, req) + servers = [self._build_view(req, inst, is_detail)['server'] + for inst in limited_list] + return dict(servers=servers) + + def index(self, req, vsa_id): + """Return list of instances for particular VSA.""" + + LOG.audit(_("Index instances for VSA %s"), vsa_id) + + self.vsa_id = vsa_id # VP-TODO: temporary ugly hack + result = super(VsaVCController, self).detail(req) + self.vsa_id = None + return result + + def create(self, req, vsa_id, body): + """Create a new instance for VSA.""" + return faults.Fault(exc.HTTPBadRequest()) + + def update(self, req, vsa_id, id, body): + """Update VSA instance.""" + return faults.Fault(exc.HTTPBadRequest()) + + def delete(self, req, vsa_id, id): + """Delete VSA instance.""" + return faults.Fault(exc.HTTPBadRequest()) + + def show(self, req, vsa_id, id): + """Return data about the given instance.""" + return super(VsaVCController, self).show(req, id) + + +class Virtual_storage_arrays(extensions.ExtensionDescriptor): + + def get_name(self): + return "VSAs" + + def get_alias(self): + return "zadr-vsa" + + def get_description(self): + return "Virtual Storage Arrays support" + + def get_namespace(self): + return "http://docs.openstack.org/ext/vsa/api/v1.1" + + def get_updated(self): + return "2011-08-25T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension( + 'zadr-vsa', + VsaController(), + collection_actions={'detail': 'GET'}, + member_actions={'add_capacity': 'POST', + 'remove_capacity': 'POST', + 'associate_address': 'POST', + 'disassociate_address': 'POST'}) + resources.append(res) + + res = extensions.ResourceExtension('volumes', + VsaVolumeController(), + collection_actions={'detail': 'GET'}, + parent=dict( + member_name='vsa', + collection_name='zadr-vsa')) + resources.append(res) + + res = extensions.ResourceExtension('drives', + VsaDriveController(), + collection_actions={'detail': 'GET'}, + parent=dict( + member_name='vsa', + collection_name='zadr-vsa')) + resources.append(res) + + res = extensions.ResourceExtension('vpools', + VsaVPoolController(), + parent=dict( + member_name='vsa', + collection_name='zadr-vsa')) + resources.append(res) + + res = extensions.ResourceExtension('instances', + VsaVCController(), + parent=dict( + member_name='vsa', + collection_name='zadr-vsa')) + resources.append(res) + + return resources diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 483ff4985..29e071609 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -19,7 +19,6 @@ import base64 from webob import exc from xml.dom import minidom -from nova import db from nova import exception from nova import flags from nova import log as logging @@ -74,20 +73,17 @@ class CreateInstanceHelper(object): if not 'server' in body: raise exc.HTTPUnprocessableEntity() - server_dict = body['server'] context = req.environ['nova.context'] + server_dict = body['server'] password = self.controller._get_server_admin_password(server_dict) - key_name = None - key_data = None - # TODO(vish): Key pair access should move into a common library - # instead of being accessed directly from the db. - key_pairs = db.key_pair_get_all_by_user(context.elevated(), - context.user_id) - if key_pairs: - key_pair = key_pairs[0] - key_name = key_pair['name'] - key_data = key_pair['public_key'] + if not 'name' in server_dict: + msg = _("Server name is not defined") + raise exc.HTTPBadRequest(explanation=msg) + + name = server_dict['name'] + self._validate_server_name(name) + name = name.strip() image_href = self.controller._image_ref_from_req_data(body) # If the image href was generated by nova api, strip image_href @@ -98,7 +94,7 @@ class CreateInstanceHelper(object): try: image_service, image_id = nova.image.get_image_service(image_href) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( - req, image_id) + req, image_service, image_id) images = set([str(x['id']) for x in image_service.index(context)]) assert str(image_id) in images except Exception, e: @@ -133,12 +129,13 @@ class CreateInstanceHelper(object): msg = _("Invalid flavorRef provided.") raise exc.HTTPBadRequest(explanation=msg) - if not 'name' in server_dict: - msg = _("Server name is not defined") - raise exc.HTTPBadRequest(explanation=msg) - zone_blob = server_dict.get('blob') + + # optional openstack extensions: + key_name = server_dict.get('key_name') user_data = server_dict.get('user_data') + self._validate_user_data(user_data) + availability_zone = server_dict.get('availability_zone') name = server_dict['name'] self._validate_server_name(name) @@ -173,7 +170,6 @@ class CreateInstanceHelper(object): display_name=name, display_description=name, key_name=key_name, - key_data=key_data, metadata=server_dict.get('metadata', {}), access_ip_v4=server_dict.get('accessIPv4'), access_ip_v6=server_dict.get('accessIPv6'), @@ -196,6 +192,9 @@ class CreateInstanceHelper(object): except exception.FlavorNotFound as error: msg = _("Invalid flavorRef provided.") raise exc.HTTPBadRequest(explanation=msg) + except exception.KeypairNotFound as error: + msg = _("Invalid key_name provided.") + raise exc.HTTPBadRequest(explanation=msg) except exception.SecurityGroupNotFound as error: raise exc.HTTPBadRequest(explanation=unicode(error)) except RemoteError as err: @@ -248,12 +247,12 @@ class CreateInstanceHelper(object): msg = _("Server name is an empty string") raise exc.HTTPBadRequest(explanation=msg) - def _get_kernel_ramdisk_from_image(self, req, image_id): + def _get_kernel_ramdisk_from_image(self, req, image_service, image_id): """Fetch an image from the ImageService, then if present, return the associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - image_meta = self._image_service.show(context, image_id) + image_meta = image_service.show(context, image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image( @@ -370,6 +369,16 @@ class CreateInstanceHelper(object): return networks + def _validate_user_data(self, user_data): + """Check if the user_data is encoded properly""" + if not user_data: + return + try: + user_data = base64.b64decode(user_data) + except TypeError: + expl = _('Userdata content cannot be decoded') + raise exc.HTTPBadRequest(explanation=expl) + class ServerXMLDeserializer(wsgi.XMLDeserializer): """ diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index bb403845d..571185169 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -22,6 +22,7 @@ from webob import exc import webob from nova import compute +from nova import db from nova import exception from nova import flags from nova import log as logging @@ -96,17 +97,23 @@ class Controller(object): search_opts['recurse_zones'] = utils.bool_from_str( search_opts.get('recurse_zones', False)) - # If search by 'status', we need to convert it to 'state' - # If the status is unknown, bail. - # Leave 'state' in search_opts so compute can pass it on to - # child zones.. + # If search by 'status', we need to convert it to 'vm_state' + # to pass on to child zones. if 'status' in search_opts: status = search_opts['status'] - search_opts['state'] = common.power_states_from_status(status) - if len(search_opts['state']) == 0: + state = common.vm_state_from_status(status) + if state is None: reason = _('Invalid server status: %(status)s') % locals() - LOG.error(reason) raise exception.InvalidInput(reason=reason) + search_opts['vm_state'] = state + + if 'changes-since' in search_opts: + try: + parsed = utils.parse_isotime(search_opts['changes-since']) + except ValueError: + msg = _('Invalid changes-since value') + raise exc.HTTPBadRequest(explanation=msg) + search_opts['changes-since'] = parsed # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need @@ -115,23 +122,17 @@ class Controller(object): # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: - # Admin hasn't specified deleted filter if 'changes-since' not in search_opts: - # No 'changes-since', so we need to find non-deleted servers + # No 'changes-since', so we only want non-deleted servers search_opts['deleted'] = False - else: - # This is the default, but just in case.. - search_opts['deleted'] = True - instance_list = self.compute_api.get_all( - context, search_opts=search_opts) - - # FIXME(comstud): 'changes-since' is not fully implemented. Where - # should this be filtered? + instance_list = self.compute_api.get_all(context, + search_opts=search_opts) limited_list = self._limit_items(instance_list, req) servers = [self._build_view(req, inst, is_detail)['server'] - for inst in limited_list] + for inst in limited_list] + return dict(servers=servers) @scheduler_api.redirect_handler @@ -144,10 +145,16 @@ class Controller(object): except exception.NotFound: raise exc.HTTPNotFound() + def _get_key_name(self, req, body): + """ Get default keypair if not set """ + raise NotImplementedError() + def create(self, req, body): """ Creates a new server for a given user """ + if 'server' in body: + body['server']['key_name'] = self._get_key_name(req, body) + extra_values = None - result = None extra_values, instances = self.helper.create_instance( req, body, self.compute_api.create) @@ -565,6 +572,13 @@ class ControllerV10(Controller): raise exc.HTTPNotFound() return webob.Response(status_int=202) + def _get_key_name(self, req, body): + context = req.environ["nova.context"] + keypairs = db.key_pair_get_all_by_user(context, + context.user_id) + if keypairs: + return keypairs[0]['name'] + def _image_ref_from_req_data(self, data): return data['server']['imageId'] @@ -609,9 +623,8 @@ class ControllerV10(Controller): try: self.compute_api.rebuild(context, instance_id, image_id, password) - except exception.BuildInProgress: - msg = _("Instance %s is currently being rebuilt.") % instance_id - LOG.debug(msg) + except exception.RebuildRequiresActiveInstance: + msg = _("Instance %s must be active to rebuild.") % instance_id raise exc.HTTPConflict(explanation=msg) return webob.Response(status_int=202) @@ -636,6 +649,10 @@ class ControllerV11(Controller): except exception.NotFound: raise exc.HTTPNotFound() + def _get_key_name(self, req, body): + if 'server' in body: + return body['server'].get('key_name') + def _image_ref_from_req_data(self, data): try: return data['server']['imageRef'] @@ -751,9 +768,8 @@ class ControllerV11(Controller): self.compute_api.rebuild(context, instance_id, image_href, password, name=name, metadata=metadata, files_to_inject=personalities) - except exception.BuildInProgress: - msg = _("Instance %s is currently being rebuilt.") % instance_id - LOG.debug(msg) + except exception.RebuildRequiresActiveInstance: + msg = _("Instance %s must be active to rebuild.") % instance_id raise exc.HTTPConflict(explanation=msg) except exception.InstanceNotFound: msg = _("Instance %s could not be found") % instance_id diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 0ec98591e..3a13d15f1 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -21,13 +21,12 @@ import hashlib import os from nova import exception -import nova.compute -import nova.context from nova.api.openstack import common from nova.api.openstack.views import addresses as addresses_view from nova.api.openstack.views import flavors as flavors_view from nova.api.openstack.views import images as images_view from nova import utils +from nova.compute import vm_states class ViewBuilder(object): @@ -61,17 +60,13 @@ class ViewBuilder(object): def _build_detail(self, inst): """Returns a detailed model of a server.""" + vm_state = inst.get('vm_state', vm_states.BUILDING) + task_state = inst.get('task_state') inst_dict = { 'id': inst['id'], 'name': inst['display_name'], - 'status': common.status_from_power_state(inst.get('state'))} - - ctxt = nova.context.get_admin_context() - compute_api = nova.compute.API() - - if compute_api.has_finished_migration(ctxt, inst['uuid']): - inst_dict['status'] = 'RESIZE-CONFIRM' + 'status': common.status_from_state(vm_state, task_state)} # Return the metadata as a dictionary metadata = {} @@ -188,6 +183,7 @@ class ViewBuilderV11(ViewBuilder): def _build_extra(self, response, inst): self._build_links(response, inst) response['uuid'] = inst['uuid'] + response['key_name'] = inst.get('key_name', '') self._build_config_drive(response, inst) def _build_links(self, response, inst): diff --git a/nova/compute/api.py b/nova/compute/api.py index 60a13631a..e045ef3de 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -19,13 +19,11 @@ """Handles all requests relating to instances (guest vms).""" -import eventlet import novaclient import re import time from nova import block_device -from nova import db from nova import exception from nova import flags import nova.image @@ -37,6 +35,8 @@ from nova import utils from nova import volume from nova.compute import instance_types from nova.compute import power_state +from nova.compute import task_states +from nova.compute import vm_states from nova.compute.utils import terminate_volumes from nova.scheduler import api as scheduler_api from nova.db import base @@ -75,12 +75,18 @@ def generate_default_hostname(instance): def _is_able_to_shutdown(instance, instance_id): - states = {'terminating': "Instance %s is already being terminated", - 'migrating': "Instance %s is being migrated", - 'stopping': "Instance %s is being stopped"} - msg = states.get(instance['state_description']) - if msg: - LOG.warning(_(msg), instance_id) + vm_state = instance["vm_state"] + task_state = instance["task_state"] + + valid_shutdown_states = [ + vm_states.ACTIVE, + vm_states.REBUILDING, + vm_states.BUILDING, + ] + + if vm_state not in valid_shutdown_states: + LOG.warn(_("Instance %(instance_id)s is not in an 'active' state. It " + "is currently %(vm_state)s. Shutdown aborted.") % locals()) return False return True @@ -237,7 +243,7 @@ class API(base.Base): self.ensure_default_security_group(context) if key_data is None and key_name: - key_pair = db.key_pair_get(context, context.user_id, key_name) + key_pair = self.db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] if reservation_id is None: @@ -251,10 +257,10 @@ class API(base.Base): 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', + 'power_state': power_state.NOSTATE, + 'vm_state': vm_states.BUILDING, 'config_drive_id': config_drive_id or '', 'config_drive': config_drive or '', - 'state': 0, - 'state_description': 'scheduling', 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), @@ -389,9 +395,9 @@ class API(base.Base): security_groups = [] for security_group_name in security_group: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) + group = self.db.security_group_get_by_name(context, + context.project_id, + security_group_name) security_groups.append(group['id']) for security_group_id in security_groups: @@ -415,6 +421,8 @@ class API(base.Base): updates['display_name'] = "Server %s" % instance_id instance['display_name'] = updates['display_name'] updates['hostname'] = self.hostname_factory(instance) + updates['vm_state'] = vm_states.BUILDING + updates['task_state'] = task_states.SCHEDULING instance = self.update(context, instance_id, **updates) return instance @@ -551,8 +559,9 @@ class API(base.Base): def has_finished_migration(self, context, instance_uuid): """Returns true if an instance has a finished migration.""" try: - db.migration_get_by_instance_and_status(context, instance_uuid, - 'finished') + self.db.migration_get_by_instance_and_status(context, + instance_uuid, + 'finished') return True except exception.NotFound: return False @@ -566,14 +575,15 @@ class API(base.Base): :param context: the security context """ try: - db.security_group_get_by_name(context, context.project_id, - 'default') + self.db.security_group_get_by_name(context, + context.project_id, + 'default') except exception.NotFound: values = {'name': 'default', 'description': 'default', 'user_id': context.user_id, 'project_id': context.project_id} - db.security_group_create(context, values) + self.db.security_group_create(context, values) def trigger_security_group_rules_refresh(self, context, security_group_id): """Called when a rule is added to or removed from a security_group.""" @@ -638,7 +648,7 @@ class API(base.Base): """Called when a rule is added to or removed from a security_group""" hosts = [x['host'] for (x, idx) - in db.service_get_all_compute_sorted(context)] + in self.db.service_get_all_compute_sorted(context)] for host in hosts: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), @@ -666,11 +676,11 @@ class API(base.Base): def add_security_group(self, context, instance_id, security_group_name): """Add security group to the instance""" - security_group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) + security_group = self.db.security_group_get_by_name(context, + context.project_id, + security_group_name) # check if the server exists - inst = db.instance_get(context, instance_id) + inst = self.db.instance_get(context, instance_id) #check if the security group is associated with the server if self._is_security_group_associated_with_server(security_group, instance_id): @@ -682,21 +692,21 @@ class API(base.Base): if inst['state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_id) - db.instance_add_security_group(context.elevated(), - instance_id, - security_group['id']) + self.db.instance_add_security_group(context.elevated(), + instance_id, + security_group['id']) rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, inst['host']), + self.db.queue_get_for(context, FLAGS.compute_topic, inst['host']), {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group['id']}}) def remove_security_group(self, context, instance_id, security_group_name): """Remove the security group associated with the instance""" - security_group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) + security_group = self.db.security_group_get_by_name(context, + context.project_id, + security_group_name) # check if the server exists - inst = db.instance_get(context, instance_id) + inst = self.db.instance_get(context, instance_id) #check if the security group is associated with the server if not self._is_security_group_associated_with_server(security_group, instance_id): @@ -708,11 +718,11 @@ class API(base.Base): if inst['state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_id) - db.instance_remove_security_group(context.elevated(), - instance_id, - security_group['id']) + self.db.instance_remove_security_group(context.elevated(), + instance_id, + security_group['id']) rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, inst['host']), + self.db.queue_get_for(context, FLAGS.compute_topic, inst['host']), {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group['id']}}) @@ -750,10 +760,8 @@ class API(base.Base): return self.update(context, - instance['id'], - state_description='terminating', - state=0, - terminated_at=utils.utcnow()) + instance_id, + task_state=task_states.DELETING) host = instance['host'] if host: @@ -773,9 +781,9 @@ class API(base.Base): return self.update(context, - instance['id'], - state_description='stopping', - state=power_state.NOSTATE, + instance_id, + vm_state=vm_states.ACTIVE, + task_state=task_states.STOPPING, terminated_at=utils.utcnow()) host = instance['host'] @@ -787,12 +795,18 @@ class API(base.Base): """Start an instance.""" LOG.debug(_("Going to try to start %s"), instance_id) instance = self._get_instance(context, instance_id, 'starting') - if instance['state_description'] != 'stopped': - _state_description = instance['state_description'] + vm_state = instance["vm_state"] + + if vm_state != vm_states.STOPPED: LOG.warning(_("Instance %(instance_id)s is not " - "stopped(%(_state_description)s)") % locals()) + "stopped. (%(vm_state)s)") % locals()) return + self.update(context, + instance_id, + vm_state=vm_states.STOPPED, + task_state=task_states.STARTING) + # TODO(yamahata): injected_files isn't supported right now. # It is used only for osapi. not for ec2 api. # availability_zone isn't used by run_instance. @@ -802,6 +816,15 @@ class API(base.Base): "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id}}) + def get_active_by_window(self, context, begin, end=None, project_id=None): + """Get instances that were continuously active over a window.""" + return self.db.instance_get_active_by_window(context, begin, end, + project_id) + + def get_instance_type(self, context, instance_type_id): + """Get an instance type by instance type id.""" + return self.db.instance_type_get(context, instance_type_id) + def get(self, context, instance_id): """Get a single instance with the given instance_id.""" # NOTE(sirp): id used to be exclusively integer IDs; now we're @@ -1001,7 +1024,7 @@ class API(base.Base): :param extra_properties: dict of extra image properties to include """ - instance = db.api.instance_get(context, instance_id) + instance = self.db.instance_get(context, instance_id) properties = {'instance_uuid': instance['uuid'], 'user_id': str(context.user_id), 'image_state': 'creating', @@ -1020,32 +1043,39 @@ class API(base.Base): @scheduler_api.reroute_compute("reboot") def reboot(self, context, instance_id): """Reboot the given instance.""" + self.update(context, + instance_id, + vm_state=vm_states.ACTIVE, + task_state=task_states.REBOOTING) self._cast_compute_message('reboot_instance', context, instance_id) @scheduler_api.reroute_compute("rebuild") def rebuild(self, context, instance_id, image_href, admin_password, name=None, metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" - instance = db.api.instance_get(context, instance_id) + instance = self.db.instance_get(context, instance_id) + name = name or instance["display_name"] - if instance["state"] == power_state.BUILDING: - msg = _("Instance already building") - raise exception.BuildInProgress(msg) + if instance["vm_state"] != vm_states.ACTIVE: + msg = _("Instance must be active to rebuild.") + raise exception.RebuildRequiresActiveInstance(msg) files_to_inject = files_to_inject or [] + metadata = metadata or {} + self._check_injected_file_quota(context, files_to_inject) + self._check_metadata_properties_quota(context, metadata) - values = {} - if metadata is not None: - self._check_metadata_properties_quota(context, metadata) - values['metadata'] = metadata - if name is not None: - values['display_name'] = name - self.db.instance_update(context, instance_id, values) + self.update(context, + instance_id, + metadata=metadata, + display_name=name, + image_ref=image_href, + vm_state=vm_states.ACTIVE, + task_state=task_states.REBUILDING) rebuild_params = { "new_pass": admin_password, - "image_ref": image_href, "injected_files": files_to_inject, } @@ -1066,6 +1096,11 @@ class API(base.Base): raise exception.MigrationNotFoundByStatus(instance_id=instance_id, status='finished') + self.update(context, + instance_id, + vm_state=vm_states.ACTIVE, + task_state=None) + params = {'migration_id': migration_ref['id']} self._cast_compute_message('revert_resize', context, instance_ref['uuid'], @@ -1086,6 +1121,12 @@ class API(base.Base): if not migration_ref: raise exception.MigrationNotFoundByStatus(instance_id=instance_id, status='finished') + + self.update(context, + instance_id, + vm_state=vm_states.ACTIVE, + task_state=None) + params = {'migration_id': migration_ref['id']} self._cast_compute_message('confirm_resize', context, instance_ref['uuid'], @@ -1131,6 +1172,11 @@ class API(base.Base): if (current_memory_mb == new_memory_mb) and flavor_id: raise exception.CannotResizeToSameSize() + self.update(context, + instance_id, + vm_state=vm_states.RESIZING, + task_state=task_states.RESIZE_PREP) + instance_ref = self._get_instance(context, instance_id, 'resize') self._cast_scheduler_message(context, {"method": "prep_resize", @@ -1164,11 +1210,19 @@ class API(base.Base): @scheduler_api.reroute_compute("pause") def pause(self, context, instance_id): """Pause the given instance.""" + self.update(context, + instance_id, + vm_state=vm_states.ACTIVE, + task_state=task_states.PAUSING) self._cast_compute_message('pause_instance', context, instance_id) @scheduler_api.reroute_compute("unpause") def unpause(self, context, instance_id): """Unpause the given instance.""" + self.update(context, + instance_id, + vm_state=vm_states.PAUSED, + task_state=task_states.UNPAUSING) self._cast_compute_message('unpause_instance', context, instance_id) def _call_compute_message_for_host(self, action, context, host, params): @@ -1201,21 +1255,37 @@ class API(base.Base): @scheduler_api.reroute_compute("suspend") def suspend(self, context, instance_id): """Suspend the given instance.""" + self.update(context, + instance_id, + vm_state=vm_states.ACTIVE, + task_state=task_states.SUSPENDING) self._cast_compute_message('suspend_instance', context, instance_id) @scheduler_api.reroute_compute("resume") def resume(self, context, instance_id): """Resume the given instance.""" + self.update(context, + instance_id, + vm_state=vm_states.SUSPENDED, + task_state=task_states.RESUMING) self._cast_compute_message('resume_instance', context, instance_id) @scheduler_api.reroute_compute("rescue") def rescue(self, context, instance_id): """Rescue the given instance.""" + self.update(context, + instance_id, + vm_state=vm_states.ACTIVE, + task_state=task_states.RESCUING) self._cast_compute_message('rescue_instance', context, instance_id) @scheduler_api.reroute_compute("unrescue") def unrescue(self, context, instance_id): """Unrescue the given instance.""" + self.update(context, + instance_id, + vm_state=vm_states.RESCUED, + task_state=task_states.UNRESCUING) self._cast_compute_message('unrescue_instance', context, instance_id) @scheduler_api.reroute_compute("set_admin_password") diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ade15e310..0477db745 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -56,6 +56,8 @@ from nova import rpc from nova import utils from nova import volume from nova.compute import power_state +from nova.compute import task_states +from nova.compute import vm_states from nova.notifier import api as notifier from nova.compute.utils import terminate_volumes from nova.virt import driver @@ -146,6 +148,10 @@ class ComputeManager(manager.SchedulerDependentManager): super(ComputeManager, self).__init__(service_name="compute", *args, **kwargs) + def _instance_update(self, context, instance_id, **kwargs): + """Update an instance in the database using kwargs as value.""" + return self.db.instance_update(context, instance_id, kwargs) + def init_host(self): """Initialization for a standalone compute service.""" self.driver.init_host(host=self.host) @@ -153,8 +159,8 @@ class ComputeManager(manager.SchedulerDependentManager): instances = self.db.instance_get_all_by_host(context, self.host) for instance in instances: inst_name = instance['name'] - db_state = instance['state'] - drv_state = self._update_state(context, instance['id']) + db_state = instance['power_state'] + drv_state = self._get_power_state(context, instance) expect_running = db_state == power_state.RUNNING \ and drv_state != db_state @@ -177,34 +183,13 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.warning(_('Hypervisor driver does not ' 'support firewall rules')) - def _update_state(self, context, instance_id, state=None): - """Update the state of an instance from the driver info.""" - instance_ref = self.db.instance_get(context, instance_id) - - if state is None: - try: - LOG.debug(_('Checking state of %s'), instance_ref['name']) - info = self.driver.get_info(instance_ref['name']) - except exception.NotFound: - info = None - - if info is not None: - state = info['state'] - else: - state = power_state.FAILED - - self.db.instance_set_state(context, instance_id, state) - return state - - def _update_launched_at(self, context, instance_id, launched_at=None): - """Update the launched_at parameter of the given instance.""" - data = {'launched_at': launched_at or utils.utcnow()} - self.db.instance_update(context, instance_id, data) - - def _update_image_ref(self, context, instance_id, image_ref): - """Update the image_id for the given instance.""" - data = {'image_ref': image_ref} - self.db.instance_update(context, instance_id, data) + def _get_power_state(self, context, instance): + """Retrieve the power state for the given instance.""" + LOG.debug(_('Checking state of %s'), instance['name']) + try: + return self.driver.get_info(instance['name'])["state"] + except exception.NotFound: + return power_state.FAILED def get_console_topic(self, context, **kwargs): """Retrieves the console host for a project on this host. @@ -256,11 +241,6 @@ class ComputeManager(manager.SchedulerDependentManager): def _setup_block_device_mapping(self, context, instance_id): """setup volumes for block device mapping""" - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'block_device_mapping') - volume_api = volume.API() block_device_mapping = [] swap = None @@ -394,17 +374,12 @@ class ComputeManager(manager.SchedulerDependentManager): updates = {} updates['host'] = self.host updates['launched_on'] = self.host - instance = self.db.instance_update(context, - instance_id, - updates) + updates['vm_state'] = vm_states.BUILDING + updates['task_state'] = task_states.NETWORKING + instance = self.db.instance_update(context, instance_id, updates) instance['injected_files'] = kwargs.get('injected_files', []) instance['admin_pass'] = kwargs.get('admin_password', None) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'networking') - is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id) try: # NOTE(vish): This could be a cast because we don't do anything @@ -423,6 +398,11 @@ class ComputeManager(manager.SchedulerDependentManager): # all vif creation and network injection, maybe this is correct network_info = [] + self._instance_update(context, + instance_id, + vm_state=vm_states.BUILDING, + task_state=task_states.BLOCK_DEVICE_MAPPING) + (swap, ephemerals, block_device_mapping) = self._setup_block_device_mapping( context, instance_id) @@ -432,9 +412,12 @@ class ComputeManager(manager.SchedulerDependentManager): 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping} - # TODO(vish) check to make sure the availability zone matches - self._update_state(context, instance_id, power_state.BUILDING) + self._instance_update(context, + instance_id, + vm_state=vm_states.BUILDING, + task_state=task_states.SPAWNING) + # TODO(vish) check to make sure the availability zone matches try: self.driver.spawn(context, instance, network_info, block_device_info) @@ -443,13 +426,21 @@ class ComputeManager(manager.SchedulerDependentManager): "virtualization enabled in the BIOS? Details: " "%(ex)s") % locals() LOG.exception(msg) + return + + current_power_state = self._get_power_state(context, instance) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_states.ACTIVE, + task_state=None, + launched_at=utils.utcnow()) - self._update_launched_at(context, instance_id) - self._update_state(context, instance_id) usage_info = utils.usage_from_instance(instance) notifier.notify('compute.%s' % self.host, 'compute.instance.create', notifier.INFO, usage_info) + except exception.InstanceNotFound: # FIXME(wwolf): We are just ignoring InstanceNotFound # exceptions here in case the instance was immediately @@ -485,8 +476,7 @@ class ComputeManager(manager.SchedulerDependentManager): for volume in volumes: self._detach_volume(context, instance_id, volume['id'], False) - if (instance['state'] == power_state.SHUTOFF and - instance['state_description'] != 'stopped'): + if instance['power_state'] == power_state.SHUTOFF: self.db.instance_destroy(context, instance_id) raise exception.Error(_('trying to destroy already destroyed' ' instance: %s') % instance_id) @@ -501,9 +491,14 @@ class ComputeManager(manager.SchedulerDependentManager): """Terminate an instance on this host.""" self._shutdown_instance(context, instance_id, 'Terminating') instance = self.db.instance_get(context.elevated(), instance_id) + self._instance_update(context, + instance_id, + vm_state=vm_states.DELETED, + task_state=None, + terminated_at=utils.utcnow()) - # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) + usage_info = utils.usage_from_instance(instance) notifier.notify('compute.%s' % self.host, 'compute.instance.delete', @@ -514,7 +509,10 @@ class ComputeManager(manager.SchedulerDependentManager): def stop_instance(self, context, instance_id): """Stopping an instance on this host.""" self._shutdown_instance(context, instance_id, 'Stopping') - # instance state will be updated to stopped by _poll_instance_states() + self._instance_update(context, + instance_id, + vm_state=vm_states.STOPPED, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock @@ -526,7 +524,7 @@ class ComputeManager(manager.SchedulerDependentManager): :param context: `nova.RequestContext` object :param instance_id: Instance identifier (integer) - :param image_ref: Image identifier (href or integer) + :param injected_files: Files to inject :param new_pass: password to set on rebuilt instance """ context = context.elevated() @@ -534,29 +532,46 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_("Rebuilding instance %s"), instance_id, context=context) - self._update_state(context, instance_id, power_state.BUILDING) + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_states.REBUILDING, + task_state=None) network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.destroy(instance_ref, network_info) - image_ref = kwargs.get('image_ref') - instance_ref.image_ref = image_ref + + self._instance_update(context, + instance_id, + vm_state=vm_states.REBUILDING, + task_state=task_states.BLOCK_DEVICE_MAPPING) + instance_ref.injected_files = kwargs.get('injected_files', []) network_info = self.network_api.get_instance_nw_info(context, instance_ref) bd_mapping = self._setup_block_device_mapping(context, instance_id) + self._instance_update(context, + instance_id, + vm_state=vm_states.REBUILDING, + task_state=task_states.SPAWNING) + # pull in new password here since the original password isn't in the db instance_ref.admin_pass = kwargs.get('new_pass', utils.generate_password(FLAGS.password_length)) self.driver.spawn(context, instance_ref, network_info, bd_mapping) - self._update_image_ref(context, instance_id, image_ref) - self._update_launched_at(context, instance_id) - self._update_state(context, instance_id) - usage_info = utils.usage_from_instance(instance_ref, - image_ref=image_ref) + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_states.ACTIVE, + task_state=None, + launched_at=utils.utcnow()) + + usage_info = utils.usage_from_instance(instance_ref) notifier.notify('compute.%s' % self.host, 'compute.instance.rebuild', notifier.INFO, @@ -566,26 +581,34 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def reboot_instance(self, context, instance_id): """Reboot an instance on this host.""" + LOG.audit(_("Rebooting instance %s"), instance_id, context=context) context = context.elevated() - self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_("Rebooting instance %s"), instance_id, context=context) - if instance_ref['state'] != power_state.RUNNING: - state = instance_ref['state'] + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_states.ACTIVE, + task_state=task_states.REBOOTING) + + if instance_ref['power_state'] != power_state.RUNNING: + state = instance_ref['power_state'] running = power_state.RUNNING LOG.warn(_('trying to reboot a non-running ' 'instance: %(instance_id)s (state: %(state)s ' 'expected: %(running)s)') % locals(), context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'rebooting') network_info = self._get_instance_nw_info(context, instance_ref) self.driver.reboot(instance_ref, network_info) - self._update_state(context, instance_id) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_states.ACTIVE, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def snapshot_instance(self, context, instance_id, image_id, @@ -601,37 +624,45 @@ class ComputeManager(manager.SchedulerDependentManager): :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) """ + if image_type == "snapshot": + task_state = task_states.IMAGE_SNAPSHOT + elif image_type == "backup": + task_state = task_states.IMAGE_BACKUP + else: + raise Exception(_('Image type not recognized %s') % image_type) + context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - #NOTE(sirp): update_state currently only refreshes the state field - # if we add is_snapshotting, we will need this refreshed too, - # potentially? - self._update_state(context, instance_id) + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_states.ACTIVE, + task_state=task_state) LOG.audit(_('instance %s: snapshotting'), instance_id, context=context) - if instance_ref['state'] != power_state.RUNNING: - state = instance_ref['state'] + + if instance_ref['power_state'] != power_state.RUNNING: + state = instance_ref['power_state'] running = power_state.RUNNING LOG.warn(_('trying to snapshot a non-running ' 'instance: %(instance_id)s (state: %(state)s ' 'expected: %(running)s)') % locals()) self.driver.snapshot(context, instance_ref, image_id) + self._instance_update(context, instance_id, task_state=None) + + if image_type == 'snapshot' and rotation: + raise exception.ImageRotationNotAllowed() + + elif image_type == 'backup' and rotation: + instance_uuid = instance_ref['uuid'] + self.rotate_backups(context, instance_uuid, backup_type, rotation) - if image_type == 'snapshot': - if rotation: - raise exception.ImageRotationNotAllowed() elif image_type == 'backup': - if rotation: - instance_uuid = instance_ref['uuid'] - self.rotate_backups(context, instance_uuid, backup_type, - rotation) - else: - raise exception.RotationRequiredForBackup() - else: - raise Exception(_('Image type not recognized %s') % image_type) + raise exception.RotationRequiredForBackup() def rotate_backups(self, context, instance_uuid, backup_type, rotation): """Delete excess backups associated to an instance. @@ -699,7 +730,7 @@ class ComputeManager(manager.SchedulerDependentManager): for i in xrange(max_tries): instance_ref = self.db.instance_get(context, instance_id) instance_id = instance_ref["id"] - instance_state = instance_ref["state"] + instance_state = instance_ref["power_state"] expected_state = power_state.RUNNING if instance_state != expected_state: @@ -734,7 +765,7 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) instance_id = instance_ref['id'] - instance_state = instance_ref['state'] + instance_state = instance_ref['power_state'] expected_state = power_state.RUNNING if instance_state != expected_state: LOG.warn(_('trying to inject a file into a non-running ' @@ -752,7 +783,7 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) instance_id = instance_ref['id'] - instance_state = instance_ref['state'] + instance_state = instance_ref['power_state'] expected_state = power_state.RUNNING if instance_state != expected_state: LOG.warn(_('trying to update agent on a non-running ' @@ -767,40 +798,41 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def rescue_instance(self, context, instance_id): """Rescue an instance on this host.""" + LOG.audit(_('instance %s: rescuing'), instance_id, context=context) context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: rescuing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'rescuing') - _update_state = lambda result: self._update_state_callback( - self, context, instance_id, result) network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.rescue(context, instance_ref, _update_state, network_info) - self._update_state(context, instance_id) + + # NOTE(blamar): None of the virt drivers use the 'callback' param + self.driver.rescue(context, instance_ref, None, network_info) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + vm_state=vm_states.RESCUED, + task_state=None, + power_state=current_power_state) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def unrescue_instance(self, context, instance_id): """Rescue an instance on this host.""" + LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'unrescuing') - _update_state = lambda result: self._update_state_callback( - self, context, instance_id, result) network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.unrescue(instance_ref, _update_state, network_info) - self._update_state(context, instance_id) - @staticmethod - def _update_state_callback(self, context, instance_id, result): - """Update instance state when async task completes.""" - self._update_state(context, instance_id) + # NOTE(blamar): None of the virt drivers use the 'callback' param + self.driver.unrescue(instance_ref, None, network_info) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + vm_state=vm_states.ACTIVE, + task_state=None, + power_state=current_power_state) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock @@ -859,11 +891,12 @@ class ComputeManager(manager.SchedulerDependentManager): # Just roll back the record. There's no need to resize down since # the 'old' VM already has the preferred attributes - self.db.instance_update(context, instance_ref['uuid'], - dict(memory_mb=instance_type['memory_mb'], - vcpus=instance_type['vcpus'], - local_gb=instance_type['local_gb'], - instance_type_id=instance_type['id'])) + self._instance_update(context, + instance_ref["uuid"], + memory_mb=instance_type['memory_mb'], + vcpus=instance_type['vcpus'], + local_gb=instance_type['local_gb'], + instance_type_id=instance_type['id']) self.driver.revert_migration(instance_ref) self.db.migration_update(context, migration_id, @@ -890,8 +923,11 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get_by_uuid(context, instance_id) if instance_ref['host'] == FLAGS.host: - raise exception.Error(_( - 'Migration error: destination same as source!')) + self._instance_update(context, + instance_id, + vm_state=vm_states.ERROR) + msg = _('Migration error: destination same as source!') + raise exception.Error(msg) old_instance_type = self.db.instance_type_get(context, instance_ref['instance_type_id']) @@ -985,6 +1021,11 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.finish_migration(context, instance_ref, disk_info, network_info, resize_instance) + self._instance_update(context, + instance_id, + vm_state=vm_states.ACTIVE, + task_state=task_states.RESIZE_VERIFY) + self.db.migration_update(context, migration_id, {'status': 'finished', }) @@ -1016,35 +1057,35 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def pause_instance(self, context, instance_id): """Pause an instance on this host.""" + LOG.audit(_('instance %s: pausing'), instance_id, context=context) context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: pausing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'pausing') - self.driver.pause(instance_ref, - lambda result: self._update_state_callback(self, - context, - instance_id, - result)) + self.driver.pause(instance_ref, lambda result: None) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_states.PAUSED, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def unpause_instance(self, context, instance_id): """Unpause a paused instance on this host.""" + LOG.audit(_('instance %s: unpausing'), instance_id, context=context) context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: unpausing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'unpausing') - self.driver.unpause(instance_ref, - lambda result: self._update_state_callback(self, - context, - instance_id, - result)) + self.driver.unpause(instance_ref, lambda result: None) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_states.ACTIVE, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def host_power_action(self, context, host=None, action=None): @@ -1060,7 +1101,7 @@ class ComputeManager(manager.SchedulerDependentManager): def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for an instance on this host.""" instance_ref = self.db.instance_get(context, instance_id) - if instance_ref["state"] == power_state.RUNNING: + if instance_ref["power_state"] == power_state.RUNNING: LOG.audit(_("instance %s: retrieving diagnostics"), instance_id, context=context) return self.driver.get_diagnostics(instance_ref) @@ -1069,33 +1110,35 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def suspend_instance(self, context, instance_id): """Suspend the given instance.""" + LOG.audit(_('instance %s: suspending'), instance_id, context=context) context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: suspending'), instance_id, context=context) - self.db.instance_set_state(context, instance_id, - power_state.NOSTATE, - 'suspending') - self.driver.suspend(instance_ref, - lambda result: self._update_state_callback(self, - context, - instance_id, - result)) + self.driver.suspend(instance_ref, lambda result: None) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_states.SUSPENDED, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def resume_instance(self, context, instance_id): """Resume the given suspended instance.""" + LOG.audit(_('instance %s: resuming'), instance_id, context=context) context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: resuming'), instance_id, context=context) - self.db.instance_set_state(context, instance_id, - power_state.NOSTATE, - 'resuming') - self.driver.resume(instance_ref, - lambda result: self._update_state_callback(self, - context, - instance_id, - result)) + self.driver.resume(instance_ref, lambda result: None) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_states.ACTIVE, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def lock_instance(self, context, instance_id): @@ -1506,11 +1549,14 @@ class ComputeManager(manager.SchedulerDependentManager): 'block_migration': block_migration}}) # Restore instance state - self.db.instance_update(ctxt, - instance_ref['id'], - {'state_description': 'running', - 'state': power_state.RUNNING, - 'host': dest}) + current_power_state = self._get_power_state(ctxt, instance_ref) + self._instance_update(ctxt, + instance_ref["id"], + host=dest, + power_state=current_power_state, + vm_state=vm_states.ACTIVE, + task_state=None) + # Restore volume state for volume_ref in instance_ref['volumes']: volume_id = volume_ref['id'] @@ -1556,11 +1602,11 @@ class ComputeManager(manager.SchedulerDependentManager): This param specifies destination host. """ host = instance_ref['host'] - self.db.instance_update(context, - instance_ref['id'], - {'state_description': 'running', - 'state': power_state.RUNNING, - 'host': host}) + self._instance_update(context, + instance_ref['id'], + host=host, + vm_state=vm_states.ACTIVE, + task_state=None) for volume_ref in instance_ref['volumes']: volume_id = volume_ref['id'] @@ -1608,10 +1654,9 @@ class ComputeManager(manager.SchedulerDependentManager): error_list.append(ex) try: - self._poll_instance_states(context) + self._sync_power_states(context) except Exception as ex: - LOG.warning(_("Error during instance poll: %s"), - unicode(ex)) + LOG.warning(_("Error during power_state sync: %s"), unicode(ex)) error_list.append(ex) return error_list @@ -1626,68 +1671,40 @@ class ComputeManager(manager.SchedulerDependentManager): self.update_service_capabilities( self.driver.get_host_stats(refresh=True)) - def _poll_instance_states(self, context): + def _sync_power_states(self, context): + """Align power states between the database and the hypervisor. + + The hypervisor is authoritative for the power_state data, so we + simply loop over all known instances for this host and update the + power_state according to the hypervisor. If the instance is not found + then it will be set to power_state.NOSTATE, because it doesn't exist + on the hypervisor. + + """ vm_instances = self.driver.list_instances_detail() vm_instances = dict((vm.name, vm) for vm in vm_instances) + db_instances = self.db.instance_get_all_by_host(context, self.host) - # Keep a list of VMs not in the DB, cross them off as we find them - vms_not_found_in_db = list(vm_instances.keys()) + num_vm_instances = len(vm_instances) + num_db_instances = len(db_instances) - db_instances = self.db.instance_get_all_by_host(context, self.host) + if num_vm_instances != num_db_instances: + LOG.info(_("Found %(num_db_instances)s in the database and " + "%(num_vm_instances)s on the hypervisor.") % locals()) for db_instance in db_instances: - name = db_instance['name'] - db_state = db_instance['state'] + name = db_instance["name"] + db_power_state = db_instance['power_state'] vm_instance = vm_instances.get(name) if vm_instance is None: - # NOTE(justinsb): We have to be very careful here, because a - # concurrent operation could be in progress (e.g. a spawn) - if db_state == power_state.BUILDING: - # TODO(justinsb): This does mean that if we crash during a - # spawn, the machine will never leave the spawning state, - # but this is just the way nova is; this function isn't - # trying to correct that problem. - # We could have a separate task to correct this error. - # TODO(justinsb): What happens during a live migration? - LOG.info(_("Found instance '%(name)s' in DB but no VM. " - "State=%(db_state)s, so assuming spawn is in " - "progress.") % locals()) - vm_state = db_state - else: - LOG.info(_("Found instance '%(name)s' in DB but no VM. " - "State=%(db_state)s, so setting state to " - "shutoff.") % locals()) - vm_state = power_state.SHUTOFF - if db_instance['state_description'] == 'stopping': - self.db.instance_stop(context, db_instance['id']) - continue + vm_power_state = power_state.NOSTATE else: - vm_state = vm_instance.state - vms_not_found_in_db.remove(name) - - if (db_instance['state_description'] in ['migrating', 'stopping']): - # A situation which db record exists, but no instance" - # sometimes occurs while live-migration at src compute, - # this case should be ignored. - LOG.debug(_("Ignoring %(name)s, as it's currently being " - "migrated.") % locals()) - continue - - if vm_state != db_state: - LOG.info(_("DB/VM state mismatch. Changing state from " - "'%(db_state)s' to '%(vm_state)s'") % locals()) - self._update_state(context, db_instance['id'], vm_state) + vm_power_state = vm_instance.state - # NOTE(justinsb): We no longer auto-remove SHUTOFF instances - # It's quite hard to get them back when we do. - - # Are there VMs not in the DB? - for vm_not_found_in_db in vms_not_found_in_db: - name = vm_not_found_in_db + if vm_power_state == db_power_state: + continue - # We only care about instances that compute *should* know about - if name.startswith("instance-"): - # TODO(justinsb): What to do here? Adopt it? Shut it down? - LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring") - % locals()) + self._instance_update(context, + db_instance["id"], + power_state=vm_power_state) diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py new file mode 100644 index 000000000..e3315a542 --- /dev/null +++ b/nova/compute/task_states.py @@ -0,0 +1,59 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible task states for instances. + +Compute instance task states represent what is happening to the instance at the +current moment. These tasks can be generic, such as 'spawning', or specific, +such as 'block_device_mapping'. These task states allow for a better view into +what an instance is doing and should be displayed to users/administrators as +necessary. + +""" + +SCHEDULING = 'scheduling' +BLOCK_DEVICE_MAPPING = 'block_device_mapping' +NETWORKING = 'networking' +SPAWNING = 'spawning' + +IMAGE_SNAPSHOT = 'image_snapshot' +IMAGE_BACKUP = 'image_backup' + +UPDATING_PASSWORD = 'updating_password' + +RESIZE_PREP = 'resize_prep' +RESIZE_MIGRATING = 'resize_migrating' +RESIZE_MIGRATED = 'resize_migrated' +RESIZE_FINISH = 'resize_finish' +RESIZE_REVERTING = 'resize_reverting' +RESIZE_CONFIRMING = 'resize_confirming' +RESIZE_VERIFY = 'resize_verify' + +REBUILDING = 'rebuilding' + +REBOOTING = 'rebooting' +PAUSING = 'pausing' +UNPAUSING = 'unpausing' +SUSPENDING = 'suspending' +RESUMING = 'resuming' + +RESCUING = 'rescuing' +UNRESCUING = 'unrescuing' + +DELETING = 'deleting' +STOPPING = 'stopping' +STARTING = 'starting' diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py new file mode 100644 index 000000000..6f16c1f09 --- /dev/null +++ b/nova/compute/vm_states.py @@ -0,0 +1,39 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible vm states for instances. + +Compute instance vm states represent the state of an instance as it pertains to +a user or administrator. When combined with task states (task_states.py), a +better picture can be formed regarding the instance's health. + +""" + +ACTIVE = 'active' +BUILDING = 'building' +REBUILDING = 'rebuilding' + +PAUSED = 'paused' +SUSPENDED = 'suspended' +RESCUED = 'rescued' +DELETED = 'deleted' +STOPPED = 'stopped' + +MIGRATING = 'migrating' +RESIZING = 'resizing' + +ERROR = 'error' diff --git a/nova/context.py b/nova/context.py index b917a1d81..5c22641a0 100644 --- a/nova/context.py +++ b/nova/context.py @@ -38,7 +38,7 @@ class RequestContext(object): self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: - self.admin = 'admin' in self.roles + self.is_admin = 'admin' in [x.lower() for x in self.roles] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: diff --git a/nova/db/api.py b/nova/db/api.py index 3bb9b4970..148887635 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -49,7 +49,8 @@ flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', 'Template string to be used to generate snapshot names') - +flags.DEFINE_string('vsa_name_template', 'vsa-%08x', + 'Template string to be used to generate VSA names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], sqlalchemy='nova.db.sqlalchemy.api') @@ -495,9 +496,20 @@ def instance_get_all_by_filters(context, filters): return IMPL.instance_get_all_by_filters(context, filters) -def instance_get_active_by_window(context, begin, end=None): - """Get instances active during a certain time window.""" - return IMPL.instance_get_active_by_window(context, begin, end) +def instance_get_active_by_window(context, begin, end=None, project_id=None): + """Get instances active during a certain time window. + + Specifying a project_id will filter for a certain project.""" + return IMPL.instance_get_active_by_window(context, begin, end, project_id) + + +def instance_get_active_by_window_joined(context, begin, end=None, + project_id=None): + """Get instances and joins active during a certain time window. + + Specifying a project_id will filter for a certain project.""" + return IMPL.instance_get_active_by_window_joined(context, begin, end, + project_id) def instance_get_all_by_user(context, user_id): @@ -1512,3 +1524,36 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id, key/value pairs specified in the extra specs dict argument""" IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id, extra_specs) + + +#################### + + +def vsa_create(context, values): + """Creates Virtual Storage Array record.""" + return IMPL.vsa_create(context, values) + + +def vsa_update(context, vsa_id, values): + """Updates Virtual Storage Array record.""" + return IMPL.vsa_update(context, vsa_id, values) + + +def vsa_destroy(context, vsa_id): + """Deletes Virtual Storage Array record.""" + return IMPL.vsa_destroy(context, vsa_id) + + +def vsa_get(context, vsa_id): + """Get Virtual Storage Array record by ID.""" + return IMPL.vsa_get(context, vsa_id) + + +def vsa_get_all(context): + """Get all Virtual Storage Array records.""" + return IMPL.vsa_get_all(context) + + +def vsa_get_all_by_project(context, project_id): + """Get all Virtual Storage Array records by project ID.""" + return IMPL.vsa_get_all_by_project(context, project_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index d1fbf8cab..b99667afc 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -28,6 +28,7 @@ from nova import flags from nova import ipv6 from nova import utils from nova import log as logging +from nova.compute import vm_states from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import get_session from sqlalchemy import or_ @@ -35,6 +36,7 @@ from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all from sqlalchemy.sql import func +from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import literal_column FLAGS = flags.FLAGS @@ -1102,12 +1104,11 @@ def instance_destroy(context, instance_id): def instance_stop(context, instance_id): session = get_session() with session.begin(): - from nova.compute import power_state session.query(models.Instance).\ filter_by(id=instance_id).\ update({'host': None, - 'state': power_state.SHUTOFF, - 'state_description': 'stopped', + 'vm_state': vm_states.STOPPED, + 'task_state': None, 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ @@ -1250,12 +1251,17 @@ def instance_get_all_by_filters(context, filters): options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ - filter_by(deleted=can_read_deleted(context)) + order_by(desc(models.Instance.created_at)) # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. filters = filters.copy() + if 'changes-since' in filters: + changes_since = filters['changes-since'] + query_prefix = query_prefix.\ + filter(models.Instance.updated_at > changes_since) + if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: @@ -1266,7 +1272,7 @@ def instance_get_all_by_filters(context, filters): # Filters for exact matches that we can do along with the SQL query... # For other filters that don't match this, we will do regexp matching exact_match_filter_names = ['project_id', 'user_id', 'image_ref', - 'state', 'instance_type_id', 'deleted'] + 'vm_state', 'instance_type_id', 'deleted'] query_filters = [key for key in filters.iterkeys() if key in exact_match_filter_names] @@ -1277,9 +1283,7 @@ def instance_get_all_by_filters(context, filters): query_prefix = _exact_match_filter(query_prefix, filter_name, filters.pop(filter_name)) - instances = query_prefix.\ - filter_by(deleted=can_read_deleted(context)).\ - all() + instances = query_prefix.all() if not instances: return [] @@ -1306,21 +1310,40 @@ def instance_get_all_by_filters(context, filters): return instances +@require_context +def instance_get_active_by_window(context, begin, end=None, project_id=None): + """Return instances that were continuously active over window.""" + session = get_session() + query = session.query(models.Instance).\ + filter(models.Instance.launched_at < begin) + if end: + query = query.filter(or_(models.Instance.terminated_at == None, + models.Instance.terminated_at > end)) + else: + query = query.filter(models.Instance.terminated_at == None) + if project_id: + query = query.filter_by(project_id=project_id) + return query.all() + + @require_admin_context -def instance_get_active_by_window(context, begin, end=None): - """Return instances that were continuously active over the given window""" +def instance_get_active_by_window_joined(context, begin, end=None, + project_id=None): + """Return instances and joins that were continuously active over window.""" session = get_session() query = session.query(models.Instance).\ - options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('instance_type')).\ - filter(models.Instance.launched_at < begin) + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('instance_type')).\ + filter(models.Instance.launched_at < begin) if end: query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > end)) else: query = query.filter(models.Instance.terminated_at == None) + if project_id: + query = query.filter_by(project_id=project_id) return query.all() @@ -1484,18 +1507,6 @@ def instance_get_floating_address(context, instance_id): return fixed_ip_refs[0].floating_ips[0]['address'] -@require_admin_context -def instance_set_state(context, instance_id, state, description=None): - # TODO(devcamcar): Move this out of models and into driver - from nova.compute import power_state - if not description: - description = power_state.name(state) - db.instance_update(context, - instance_id, - {'state': state, - 'state_description': description}) - - @require_context def instance_update(context, instance_id, values): session = get_session() @@ -3837,3 +3848,105 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id, "deleted": 0}) spec_ref.save(session=session) return specs + + + #################### + + +@require_admin_context +def vsa_create(context, values): + """ + Creates Virtual Storage Array record. + """ + try: + vsa_ref = models.VirtualStorageArray() + vsa_ref.update(values) + vsa_ref.save() + except Exception, e: + raise exception.DBError(e) + return vsa_ref + + +@require_admin_context +def vsa_update(context, vsa_id, values): + """ + Updates Virtual Storage Array record. + """ + session = get_session() + with session.begin(): + vsa_ref = vsa_get(context, vsa_id, session=session) + vsa_ref.update(values) + vsa_ref.save(session=session) + return vsa_ref + + +@require_admin_context +def vsa_destroy(context, vsa_id): + """ + Deletes Virtual Storage Array record. + """ + session = get_session() + with session.begin(): + session.query(models.VirtualStorageArray).\ + filter_by(id=vsa_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def vsa_get(context, vsa_id, session=None): + """ + Get Virtual Storage Array record by ID. + """ + if not session: + session = get_session() + result = None + + if is_admin_context(context): + result = session.query(models.VirtualStorageArray).\ + options(joinedload('vsa_instance_type')).\ + filter_by(id=vsa_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + elif is_user_context(context): + result = session.query(models.VirtualStorageArray).\ + options(joinedload('vsa_instance_type')).\ + filter_by(project_id=context.project_id).\ + filter_by(id=vsa_id).\ + filter_by(deleted=False).\ + first() + if not result: + raise exception.VirtualStorageArrayNotFound(id=vsa_id) + + return result + + +@require_admin_context +def vsa_get_all(context): + """ + Get all Virtual Storage Array records. + """ + session = get_session() + return session.query(models.VirtualStorageArray).\ + options(joinedload('vsa_instance_type')).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def vsa_get_all_by_project(context, project_id): + """ + Get all Virtual Storage Array records by project ID. + """ + authorize_project_context(context, project_id) + + session = get_session() + return session.query(models.VirtualStorageArray).\ + options(joinedload('vsa_instance_type')).\ + filter_by(project_id=project_id).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + + #################### diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py new file mode 100644 index 000000000..844643704 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table +from sqlalchemy import Text, Boolean, ForeignKey + +from nova import log as logging + +meta = MetaData() + +# +# New Tables +# + +virtual_storage_arrays = Table('virtual_storage_arrays', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type_id', Integer(), nullable=False), + Column('image_ref', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vc_count', Integer(), nullable=False), + Column('vol_count', Integer(), nullable=False), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + virtual_storage_arrays.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + virtual_storage_arrays.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py new file mode 100644 index 000000000..e58ae5362 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py @@ -0,0 +1,138 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy +from sqlalchemy import MetaData, Table, Column, String + +from nova.compute import task_states +from nova.compute import vm_states + + +meta = MetaData() + + +c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +_upgrade_translations = { + "stopping": { + "state_description": vm_states.ACTIVE, + "task_state": task_states.STOPPING, + }, + "stopped": { + "state_description": vm_states.STOPPED, + "task_state": None, + }, + "terminated": { + "state_description": vm_states.DELETED, + "task_state": None, + }, + "terminating": { + "state_description": vm_states.ACTIVE, + "task_state": task_states.DELETING, + }, + "running": { + "state_description": vm_states.ACTIVE, + "task_state": None, + }, + "scheduling": { + "state_description": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, + "migrating": { + "state_description": vm_states.MIGRATING, + "task_state": None, + }, + "pending": { + "state_description": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, +} + + +_downgrade_translations = { + vm_states.ACTIVE: { + None: "running", + task_states.DELETING: "terminating", + task_states.STOPPING: "stopping", + }, + vm_states.BUILDING: { + None: "pending", + task_states.SCHEDULING: "scheduling", + }, + vm_states.STOPPED: { + None: "stopped", + }, + vm_states.REBUILDING: { + None: "pending", + }, + vm_states.DELETED: { + None: "terminated", + }, + vm_states.MIGRATING: { + None: "migrating", + }, +} + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instance_table.c.state + c_state.alter(name='power_state') + + c_vm_state = instance_table.c.state_description + c_vm_state.alter(name='vm_state') + + instance_table.create_column(c_task_state) + + for old_state, values in _upgrade_translations.iteritems(): + instance_table.update().\ + values(**values).\ + where(c_vm_state == old_state).\ + execute() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_task_state = instance_table.c.task_state + + c_state = instance_table.c.power_state + c_state.alter(name='state') + + c_vm_state = instance_table.c.vm_state + c_vm_state.alter(name='state_description') + + for old_vm_state, old_task_states in _downgrade_translations.iteritems(): + for old_task_state, new_state_desc in old_task_states.iteritems(): + instance_table.update().\ + where(c_task_state == old_task_state).\ + where(c_vm_state == old_vm_state).\ + values(vm_state=new_state_desc).\ + execute() + + instance_table.drop_column('task_state') diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py index 765deb479..bb05986c9 100644 --- a/nova/db/sqlalchemy/migration.py +++ b/nova/db/sqlalchemy/migration.py @@ -64,6 +64,7 @@ def db_version(): 'users', 'user_project_association', 'user_project_role_association', 'user_role_association', + 'virtual_storage_arrays', 'volumes', 'volume_metadata', 'volume_types', 'volume_type_extra_specs'): assert table in meta.tables diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index a37ccf91a..854034f12 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -193,8 +193,9 @@ class Instance(BASE, NovaBase): key_name = Column(String(255)) key_data = Column(Text) - state = Column(Integer) - state_description = Column(String(255)) + power_state = Column(Integer) + vm_state = Column(String(255)) + task_state = Column(String(255)) memory_mb = Column(Integer) vcpus = Column(Integer) @@ -238,16 +239,31 @@ class Instance(BASE, NovaBase): access_ip_v4 = Column(String(255)) access_ip_v6 = Column(String(255)) - # TODO(vish): see Ewan's email about state improvements, probably - # should be in a driver base class or some such - # vmstate_state = running, halted, suspended, paused - # power_state = what we have - # task_state = transitory and may trigger power state transition - #@validates('state') - #def validate_state(self, key, state): - # assert(state in ['nostate', 'running', 'blocked', 'paused', - # 'shutdown', 'shutoff', 'crashed']) +class VirtualStorageArray(BASE, NovaBase): + """ + Represents a virtual storage array supplying block storage to instances. + """ + __tablename__ = 'virtual_storage_arrays' + + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + return FLAGS.vsa_name_template % self.id + + # User editable field for display in user-facing UIs + display_name = Column(String(255)) + display_description = Column(String(255)) + + project_id = Column(String(255)) + availability_zone = Column(String(255)) + + instance_type_id = Column(Integer, ForeignKey('instance_types.id')) + image_ref = Column(String(255)) + vc_count = Column(Integer, default=0) # number of requested VC instances + vol_count = Column(Integer, default=0) # total number of BE volumes + status = Column(String(255)) class InstanceActions(BASE, NovaBase): @@ -279,6 +295,12 @@ class InstanceTypes(BASE, NovaBase): primaryjoin='and_(Instance.instance_type_id == ' 'InstanceTypes.id)') + vsas = relationship(VirtualStorageArray, + backref=backref('vsa_instance_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(VirtualStorageArray.instance_type_id' + ' == InstanceTypes.id)') + class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" @@ -848,7 +870,8 @@ def register_models(): SecurityGroupInstanceAssociation, AuthToken, User, Project, Certificate, ConsolePool, Console, Zone, VolumeMetadata, VolumeTypes, VolumeTypeExtraSpecs, - AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration) + AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration, + VirtualStorageArray) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/exception.py b/nova/exception.py index 067639042..df6ff25cd 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -61,7 +61,7 @@ class ApiError(Error): super(ApiError, self).__init__(outstr) -class BuildInProgress(Error): +class RebuildRequiresActiveInstance(Error): pass @@ -146,6 +146,7 @@ class NovaException(Exception): message = _("An unknown exception occurred.") def __init__(self, **kwargs): + self.kwargs = kwargs try: self._error_string = self.message % kwargs @@ -533,6 +534,10 @@ class NoMoreFloatingIps(FloatingIpNotFound): message = _("Zero floating ips available.") +class FloatingIpAlreadyInUse(NovaException): + message = _("Floating ip %(address)s already in use by %(fixed_ip)s.") + + class NoFloatingIpsDefined(NotFound): message = _("Zero floating ips exist.") @@ -783,6 +788,18 @@ class PasteAppNotFound(NotFound): message = _("Could not load paste app '%(name)s' from %(path)s") +class VSANovaAccessParamNotFound(Invalid): + message = _("Nova access parameters were not specified.") + + +class VirtualStorageArrayNotFound(NotFound): + message = _("Virtual Storage Array %(id)d could not be found.") + + +class VirtualStorageArrayNotFoundByName(NotFound): + message = _("Virtual Storage Array %(name)s could not be found.") + + class CannotResizeToSameSize(NovaException): message = _("When resizing, instances must change size!") diff --git a/nova/flags.py b/nova/flags.py index 95000df1b..aa76defe5 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -292,6 +292,7 @@ DEFINE_string('ajax_console_proxy_url', in the form "http://127.0.0.1:8000"') DEFINE_string('ajax_console_proxy_port', 8000, 'port that ajax_console_proxy binds') +DEFINE_string('vsa_topic', 'vsa', 'the topic that nova-vsa service listens on') DEFINE_bool('verbose', False, 'show debug output') DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit') DEFINE_bool('fake_network', False, @@ -302,8 +303,12 @@ DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') -DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') -DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') +DEFINE_integer('rabbit_retry_interval', 1, + 'rabbit connection retry interval to start') +DEFINE_integer('rabbit_retry_backoff', 2, + 'rabbit connection retry backoff in seconds') +DEFINE_integer('rabbit_max_retries', 0, + 'maximum rabbit connection attempts (0=try forever)') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues') DEFINE_list('enabled_apis', ['ec2', 'osapi'], @@ -371,6 +376,17 @@ DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager', 'Manager for volume') DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') +DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager', + 'Manager for vsa') +DEFINE_string('vc_image_name', 'vc_image', + 'the VC image ID (for a VC image that exists in DB Glance)') +# VSA constants and enums +DEFINE_string('default_vsa_instance_type', 'm1.small', + 'default instance type for VSA instances') +DEFINE_integer('max_vcs_in_vsa', 32, + 'maxinum VCs in a VSA') +DEFINE_integer('vsa_part_size_gb', 100, + 'default partition size for shared capacity') # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.glance.GlanceImageService', diff --git a/nova/image/glance.py b/nova/image/glance.py index 9060f6a91..80abc7384 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -141,19 +141,30 @@ class GlanceImageService(service.BaseImageService): """Paginate through results from glance server""" images = fetch_func(**kwargs) - for image in images: - yield image - else: + if not images: # break out of recursive loop to end pagination return + for image in images: + yield image + try: # attempt to advance the marker in order to fetch next page kwargs['marker'] = images[-1]['id'] except KeyError: raise exception.ImagePaginationFailed() - self._fetch_images(fetch_func, **kwargs) + try: + kwargs['limit'] = kwargs['limit'] - len(images) + # break if we have reached a provided limit + if kwargs['limit'] <= 0: + return + except KeyError: + # ignore missing limit, just proceed without it + pass + + for image in self._fetch_images(fetch_func, **kwargs): + yield image def show(self, context, image_id): """Returns a dict with image data for the given opaque image id.""" @@ -269,6 +280,20 @@ class GlanceImageService(service.BaseImageService): image_meta = _convert_from_string(image_meta) return image_meta + @staticmethod + def _is_image_available(context, image_meta): + """Check image availability. + + Under Glance, images are always available if the context has + an auth_token. Otherwise, we fall back to the superclass + method. + + """ + if hasattr(context, 'auth_token') and context.auth_token: + return True + return service.BaseImageService._is_image_available(context, + image_meta) + # utility functions def _convert_timestamps_to_datetimes(image_meta): diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py index 27bb01988..8a08510ac 100644 --- a/nova/ipv6/account_identifier.py +++ b/nova/ipv6/account_identifier.py @@ -39,7 +39,8 @@ def to_global(prefix, mac, project_id): except TypeError: raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix) except NameError: - raise TypeError(_('Bad project_id for to_global_ipv6: %s') % project_id) + raise TypeError(_('Bad project_id for to_global_ipv6: %s') % + project_id) def to_mac(ipv6_address): diff --git a/nova/network/api.py b/nova/network/api.py index d04474df3..78580d360 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -111,6 +111,12 @@ class API(base.Base): '(%(project)s)') % {'address': floating_ip['address'], 'project': context.project_id}) + + # If this address has been previously associated to a + # different instance, disassociate the floating_ip + if floating_ip['fixed_ip'] and floating_ip['fixed_ip'] is not fixed_ip: + self.disassociate_floating_ip(context, floating_ip['address']) + # NOTE(vish): if we are multi_host, send to the instances host if fixed_ip['network']['multi_host']: host = fixed_ip['instance']['host'] diff --git a/nova/network/manager.py b/nova/network/manager.py index 404a3180e..e6b30d1a0 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -280,6 +280,13 @@ class FloatingIP(object): def associate_floating_ip(self, context, floating_address, fixed_address): """Associates an floating ip to a fixed ip.""" + floating_ip = self.db.floating_ip_get_by_address(context, + floating_address) + if floating_ip['fixed_ip']: + raise exception.FloatingIpAlreadyInUse( + address=floating_ip['address'], + fixed_ip=floating_ip['fixed_ip']['address']) + self.db.floating_ip_fixed_ip_associate(context, floating_address, fixed_address) @@ -484,17 +491,17 @@ class NetworkManager(manager.SchedulerDependentManager): # TODO(tr3buchet) eventually "enabled" should be determined def ip_dict(ip): return { - "ip": ip, - "netmask": network["netmask"], - "enabled": "1"} + 'ip': ip, + 'netmask': network['netmask'], + 'enabled': '1'} def ip6_dict(): return { - "ip": ipv6.to_global(network['cidr_v6'], + 'ip': ipv6.to_global(network['cidr_v6'], vif['address'], network['project_id']), - "netmask": network['netmask_v6'], - "enabled": "1"} + 'netmask': network['netmask_v6'], + 'enabled': '1'} network_dict = { 'bridge': network['bridge'], 'id': network['id'], diff --git a/nova/notifier/api.py b/nova/notifier/api.py index 6ef4a050e..043838536 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -122,4 +122,5 @@ def notify(publisher_id, event_type, priority, payload): driver.notify(msg) except Exception, e: LOG.exception(_("Problem '%(e)s' attempting to " - "send to notification system." % locals())) + "send to notification system. Payload=%(payload)s" % + locals())) diff --git a/nova/quota.py b/nova/quota.py index 48e598659..771477747 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -116,8 +116,9 @@ def allowed_volumes(context, requested_volumes, size): allowed_gigabytes = _get_request_allotment(requested_gigabytes, used_gigabytes, quota['gigabytes']) - allowed_volumes = min(allowed_volumes, - int(allowed_gigabytes // size)) + if size != 0: + allowed_volumes = min(allowed_volumes, + int(allowed_gigabytes // size)) return min(requested_volumes, allowed_volumes) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index bdf7f705b..c0cfdd5ce 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,44 +23,35 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'nova.rpc.amqp', - "The messaging module to use, defaults to AMQP.") + 'nova.rpc.impl_kombu', + "The messaging module to use, defaults to kombu.") -RPCIMPL = import_object(FLAGS.rpc_backend) +_RPCIMPL = None -def create_connection(new=True): - return RPCIMPL.Connection.instance(new=True) - +def get_impl(): + """Delay import of rpc_backend until FLAGS are loaded.""" + global _RPCIMPL + if _RPCIMPL is None: + _RPCIMPL = import_object(FLAGS.rpc_backend) + return _RPCIMPL -def create_consumer(conn, topic, proxy, fanout=False): - if fanout: - return RPCIMPL.FanoutAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) - else: - return RPCIMPL.TopicAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) - -def create_consumer_set(conn, consumers): - return RPCIMPL.ConsumerSet(connection=conn, consumer_list=consumers) +def create_connection(new=True): + return get_impl().create_connection(new=new) def call(context, topic, msg): - return RPCIMPL.call(context, topic, msg) + return get_impl().call(context, topic, msg) def cast(context, topic, msg): - return RPCIMPL.cast(context, topic, msg) + return get_impl().cast(context, topic, msg) def fanout_cast(context, topic, msg): - return RPCIMPL.fanout_cast(context, topic, msg) + return get_impl().fanout_cast(context, topic, msg) def multicall(context, topic, msg): - return RPCIMPL.multicall(context, topic, msg) + return get_impl().multicall(context, topic, msg) diff --git a/nova/rpc/common.py b/nova/rpc/common.py index 1d3065a83..b8c280630 100644 --- a/nova/rpc/common.py +++ b/nova/rpc/common.py @@ -1,8 +1,14 @@ from nova import exception +from nova import flags from nova import log as logging LOG = logging.getLogger('nova.rpc') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') + class RemoteError(exception.Error): """Signifies that a remote class has raised an exception. diff --git a/nova/rpc/amqp.py b/nova/rpc/impl_carrot.py index fe429b266..303a4ff88 100644 --- a/nova/rpc/amqp.py +++ b/nova/rpc/impl_carrot.py @@ -33,6 +33,7 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging +import eventlet from eventlet import greenpool from eventlet import pools from eventlet import queue @@ -42,21 +43,22 @@ from nova import context from nova import exception from nova import fakerabbit from nova import flags -from nova import log as logging -from nova import utils from nova.rpc.common import RemoteError, LOG +# Needed for tests +eventlet.monkey_patch() FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') -flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): """Connection instance object.""" + def __init__(self, *args, **kwargs): + super(Connection, self).__init__(*args, **kwargs) + self._rpc_consumers = [] + self._rpc_consumer_thread = None + @classmethod def instance(cls, new=True): """Returns the instance.""" @@ -94,13 +96,63 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() + def close(self): + self.cancel_consumer_thread() + for consumer in self._rpc_consumers: + try: + consumer.close() + except Exception: + # ignore all errors + pass + self._rpc_consumers = [] + super(Connection, self).close() + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + + consumer_set = ConsumerSet(connection=self, + consumer_list=self._rpc_consumers) + + def _consumer_thread(): + try: + consumer_set.wait() + except greenlet.GreenletExit: + return + if self._rpc_consumer_thread is None: + self._rpc_consumer_thread = eventlet.spawn(_consumer_thread) + return self._rpc_consumer_thread + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self._rpc_consumer_thread is not None: + self._rpc_consumer_thread.kill() + try: + self._rpc_consumer_thread.wait() + except greenlet.GreenletExit: + pass + self._rpc_consumer_thread = None + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls methods in the proxy""" + if fanout: + consumer = FanoutAdapterConsumer( + connection=self, + topic=topic, + proxy=proxy) + else: + consumer = TopicAdapterConsumer( + connection=self, + topic=topic, + proxy=proxy) + self._rpc_consumers.append(consumer) + class Pool(pools.Pool): """Class that implements a Pool of Connections.""" # TODO(comstud): Timeout connections not used in a while def create(self): - LOG.debug('Creating new connection') + LOG.debug('Pool creating new connection') return Connection.instance(new=True) # Create a ConnectionPool to use for RPC calls. We'll order the @@ -119,25 +171,34 @@ class Consumer(messaging.Consumer): """ def __init__(self, *args, **kwargs): - for i in xrange(FLAGS.rabbit_max_retries): - if i > 0: - time.sleep(FLAGS.rabbit_retry_interval) + max_retries = FLAGS.rabbit_max_retries + sleep_time = FLAGS.rabbit_retry_interval + tries = 0 + while True: + tries += 1 + if tries > 1: + time.sleep(sleep_time) + # backoff for next retry attempt.. if there is one + sleep_time += FLAGS.rabbit_retry_backoff + if sleep_time > 30: + sleep_time = 30 try: super(Consumer, self).__init__(*args, **kwargs) self.failed_connection = False break except Exception as e: # Catching all because carrot sucks + self.failed_connection = True + if max_retries > 0 and tries == max_retries: + break fl_host = FLAGS.rabbit_host fl_port = FLAGS.rabbit_port - fl_intv = FLAGS.rabbit_retry_interval + fl_intv = sleep_time LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is' ' unreachable: %(e)s. Trying again in %(fl_intv)d' ' seconds.') % locals()) - self.failed_connection = True if self.failed_connection: LOG.error(_('Unable to connect to AMQP server ' - 'after %d tries. Shutting down.'), - FLAGS.rabbit_max_retries) + 'after %(tries)d tries. Shutting down.') % locals()) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): @@ -166,12 +227,6 @@ class Consumer(messaging.Consumer): LOG.exception(_('Failed to fetch message from queue: %s' % e)) self.failed_connection = True - def attach_to_eventlet(self): - """Only needed for unit tests!""" - timer = utils.LoopingCall(self.fetch, enable_callbacks=True) - timer.start(0.1) - return timer - class AdapterConsumer(Consumer): """Calls methods on a proxy object based on method and args.""" @@ -242,7 +297,7 @@ class AdapterConsumer(Consumer): # NOTE(vish): this iterates through the generator list(rval) except Exception as e: - logging.exception('Exception during message handling') + LOG.exception('Exception during message handling') if msg_id: msg_reply(msg_id, None, sys.exc_info()) return @@ -520,6 +575,11 @@ class MulticallWaiter(object): yield result +def create_connection(new=True): + """Create a connection""" + return Connection.instance(new=new) + + def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py new file mode 100644 index 000000000..b994a6a10 --- /dev/null +++ b/nova/rpc/impl_kombu.py @@ -0,0 +1,781 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import kombu +import kombu.entity +import kombu.messaging +import kombu.connection +import itertools +import sys +import time +import traceback +import types +import uuid + +import eventlet +from eventlet import greenpool +from eventlet import pools +import greenlet + +from nova import context +from nova import exception +from nova import flags +from nova.rpc.common import RemoteError, LOG + +# Needed for tests +eventlet.monkey_patch() + +FLAGS = flags.FLAGS + + +class ConsumerBase(object): + """Consumer base class.""" + + def __init__(self, channel, callback, tag, **kwargs): + """Declare a queue on an amqp channel. + + 'channel' is the amqp channel to use + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + queue name, exchange name, and other kombu options are + passed in here as a dictionary. + """ + self.callback = callback + self.tag = str(tag) + self.kwargs = kwargs + self.queue = None + self.reconnect(channel) + + def reconnect(self, channel): + """Re-declare the queue after a rabbit reconnect""" + self.channel = channel + self.kwargs['channel'] = channel + self.queue = kombu.entity.Queue(**self.kwargs) + self.queue.declare() + + def consume(self, *args, **kwargs): + """Actually declare the consumer on the amqp channel. This will + start the flow of messages from the queue. Using the + Connection.iterconsume() iterator will process the messages, + calling the appropriate callback. + + If a callback is specified in kwargs, use that. Otherwise, + use the callback passed during __init__() + + If kwargs['nowait'] is True, then this call will block until + a message is read. + + Messages will automatically be acked if the callback doesn't + raise an exception + """ + + options = {'consumer_tag': self.tag} + options['nowait'] = kwargs.get('nowait', False) + callback = kwargs.get('callback', self.callback) + if not callback: + raise ValueError("No callback defined") + + def _callback(raw_message): + message = self.channel.message_to_python(raw_message) + callback(message.payload) + message.ack() + + self.queue.consume(*args, callback=_callback, **options) + + def cancel(self): + """Cancel the consuming from the queue, if it has started""" + try: + self.queue.cancel(self.tag) + except KeyError, e: + # NOTE(comstud): Kludge to get around a amqplib bug + if str(e) != "u'%s'" % self.tag: + raise + self.queue = None + + +class DirectConsumer(ConsumerBase): + """Queue/consumer class for 'direct'""" + + def __init__(self, channel, msg_id, callback, tag, **kwargs): + """Init a 'direct' queue. + + 'channel' is the amqp channel to use + 'msg_id' is the msg_id to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + # Default options + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=msg_id, + type='direct', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(DirectConsumer, self).__init__( + channel, + callback, + tag, + name=msg_id, + exchange=exchange, + routing_key=msg_id, + **options) + + +class TopicConsumer(ConsumerBase): + """Consumer class for 'topic'""" + + def __init__(self, channel, topic, callback, tag, **kwargs): + """Init a 'topic' queue. + + 'channel' is the amqp channel to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + # Default options + options = {'durable': FLAGS.rabbit_durable_queues, + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=FLAGS.control_exchange, + type='topic', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(TopicConsumer, self).__init__( + channel, + callback, + tag, + name=topic, + exchange=exchange, + routing_key=topic, + **options) + + +class FanoutConsumer(ConsumerBase): + """Consumer class for 'fanout'""" + + def __init__(self, channel, topic, callback, tag, **kwargs): + """Init a 'fanout' queue. + + 'channel' is the amqp channel to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + unique = uuid.uuid4().hex + exchange_name = '%s_fanout' % topic + queue_name = '%s_fanout_%s' % (topic, unique) + + # Default options + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=exchange_name, + type='fanout', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(FanoutConsumer, self).__init__( + channel, + callback, + tag, + name=queue_name, + exchange=exchange, + routing_key=topic, + **options) + + +class Publisher(object): + """Base Publisher class""" + + def __init__(self, channel, exchange_name, routing_key, **kwargs): + """Init the Publisher class with the exchange_name, routing_key, + and other options + """ + self.exchange_name = exchange_name + self.routing_key = routing_key + self.kwargs = kwargs + self.reconnect(channel) + + def reconnect(self, channel): + """Re-establish the Producer after a rabbit reconnection""" + self.exchange = kombu.entity.Exchange(name=self.exchange_name, + **self.kwargs) + self.producer = kombu.messaging.Producer(exchange=self.exchange, + channel=channel, routing_key=self.routing_key) + + def send(self, msg): + """Send a message""" + self.producer.publish(msg) + + +class DirectPublisher(Publisher): + """Publisher class for 'direct'""" + def __init__(self, channel, msg_id, **kwargs): + """init a 'direct' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + super(DirectPublisher, self).__init__(channel, + msg_id, + msg_id, + type='direct', + **options) + + +class TopicPublisher(Publisher): + """Publisher class for 'topic'""" + def __init__(self, channel, topic, **kwargs): + """init a 'topic' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': FLAGS.rabbit_durable_queues, + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + super(TopicPublisher, self).__init__(channel, + FLAGS.control_exchange, + topic, + type='topic', + **options) + + +class FanoutPublisher(Publisher): + """Publisher class for 'fanout'""" + def __init__(self, channel, topic, **kwargs): + """init a 'fanout' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + super(FanoutPublisher, self).__init__(channel, + '%s_fanout' % topic, + None, + type='fanout', + **options) + + +class Connection(object): + """Connection object.""" + + def __init__(self): + self.consumers = [] + self.consumer_thread = None + self.max_retries = FLAGS.rabbit_max_retries + # Try forever? + if self.max_retries <= 0: + self.max_retries = None + self.interval_start = FLAGS.rabbit_retry_interval + self.interval_stepping = FLAGS.rabbit_retry_backoff + # max retry-interval = 30 seconds + self.interval_max = 30 + self.memory_transport = False + + self.params = dict(hostname=FLAGS.rabbit_host, + port=FLAGS.rabbit_port, + userid=FLAGS.rabbit_userid, + password=FLAGS.rabbit_password, + virtual_host=FLAGS.rabbit_virtual_host) + if FLAGS.fake_rabbit: + self.params['transport'] = 'memory' + self.memory_transport = True + else: + self.memory_transport = False + self.connection = None + self.reconnect() + + def reconnect(self): + """Handles reconnecting and re-estblishing queues""" + if self.connection: + try: + self.connection.close() + except self.connection.connection_errors: + pass + time.sleep(1) + self.connection = kombu.connection.BrokerConnection(**self.params) + if self.memory_transport: + # Kludge to speed up tests. + self.connection.transport.polling_interval = 0.0 + self.consumer_num = itertools.count(1) + + try: + self.connection.ensure_connection(errback=self.connect_error, + max_retries=self.max_retries, + interval_start=self.interval_start, + interval_step=self.interval_stepping, + interval_max=self.interval_max) + except self.connection.connection_errors, e: + # We should only get here if max_retries is set. We'll go + # ahead and exit in this case. + err_str = str(e) + max_retries = self.max_retries + LOG.error(_('Unable to connect to AMQP server ' + 'after %(max_retries)d tries: %(err_str)s') % locals()) + sys.exit(1) + LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % + self.params)) + self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') + for consumer in self.consumers: + consumer.reconnect(self.channel) + if self.consumers: + LOG.debug(_("Re-established AMQP queues")) + + def get_channel(self): + """Convenience call for bin/clear_rabbit_queues""" + return self.channel + + def connect_error(self, exc, interval): + """Callback when there are connection re-tries by kombu""" + info = self.params.copy() + info['intv'] = interval + info['e'] = exc + LOG.error(_('AMQP server on %(hostname)s:%(port)d is' + ' unreachable: %(e)s. Trying again in %(intv)d' + ' seconds.') % info) + + def close(self): + """Close/release this connection""" + self.cancel_consumer_thread() + self.connection.release() + self.connection = None + + def reset(self): + """Reset a connection so it can be used again""" + self.cancel_consumer_thread() + self.channel.close() + self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') + self.consumers = [] + + def declare_consumer(self, consumer_cls, topic, callback): + """Create a Consumer using the class that was passed in and + add it to our list of consumers + """ + consumer = consumer_cls(self.channel, topic, callback, + self.consumer_num.next()) + self.consumers.append(consumer) + return consumer + + def iterconsume(self, limit=None): + """Return an iterator that will consume from all queues/consumers""" + while True: + try: + queues_head = self.consumers[:-1] + queues_tail = self.consumers[-1] + for queue in queues_head: + queue.consume(nowait=True) + queues_tail.consume(nowait=False) + + for iteration in itertools.count(0): + if limit and iteration >= limit: + raise StopIteration + yield self.connection.drain_events() + except self.connection.connection_errors, e: + LOG.exception(_('Failed to consume message from queue: ' + '%s' % str(e))) + self.reconnect() + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self.consumer_thread is not None: + self.consumer_thread.kill() + try: + self.consumer_thread.wait() + except greenlet.GreenletExit: + pass + self.consumer_thread = None + + def publisher_send(self, cls, topic, msg): + """Send to a publisher based on the publisher class""" + while True: + publisher = None + try: + publisher = cls(self.channel, topic) + publisher.send(msg) + return + except self.connection.connection_errors, e: + LOG.exception(_('Failed to publish message %s' % str(e))) + try: + self.reconnect() + if publisher: + publisher.reconnect(self.channel) + except self.connection.connection_errors, e: + pass + + def declare_direct_consumer(self, topic, callback): + """Create a 'direct' queue. + In nova's use, this is generally a msg_id queue used for + responses for call/multicall + """ + self.declare_consumer(DirectConsumer, topic, callback) + + def declare_topic_consumer(self, topic, callback=None): + """Create a 'topic' consumer.""" + self.declare_consumer(TopicConsumer, topic, callback) + + def declare_fanout_consumer(self, topic, callback): + """Create a 'fanout' consumer""" + self.declare_consumer(FanoutConsumer, topic, callback) + + def direct_send(self, msg_id, msg): + """Send a 'direct' message""" + self.publisher_send(DirectPublisher, msg_id, msg) + + def topic_send(self, topic, msg): + """Send a 'topic' message""" + self.publisher_send(TopicPublisher, topic, msg) + + def fanout_send(self, topic, msg): + """Send a 'fanout' message""" + self.publisher_send(FanoutPublisher, topic, msg) + + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if self.consumer_thread is None: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + if fanout: + self.declare_fanout_consumer(topic, ProxyCallback(proxy)) + else: + self.declare_topic_consumer(topic, ProxyCallback(proxy)) + + +class Pool(pools.Pool): + """Class that implements a Pool of Connections.""" + + # TODO(comstud): Timeout connections not used in a while + def create(self): + LOG.debug('Pool creating new connection') + return Connection() + +# Create a ConnectionPool to use for RPC calls. We'll order the +# pool as a stack (LIFO), so that we can potentially loop through and +# timeout old unused connections at some point +ConnectionPool = Pool( + max_size=FLAGS.rpc_conn_pool_size, + order_as_stack=True) + + +class ConnectionContext(object): + """The class that is actually returned to the caller of + create_connection(). This is a essentially a wrapper around + Connection that supports 'with' and can return a new Connection or + one from a pool. It will also catch when an instance of this class + is to be deleted so that we can return Connections to the pool on + exceptions and so forth without making the caller be responsible for + catching all exceptions and making sure to return a connection to + the pool. + """ + + def __init__(self, pooled=True): + """Create a new connection, or get one from the pool""" + self.connection = None + if pooled: + self.connection = ConnectionPool.get() + else: + self.connection = Connection() + self.pooled = pooled + + def __enter__(self): + """with ConnectionContext() should return self""" + return self + + def _done(self): + """If the connection came from a pool, clean it up and put it back. + If it did not come from a pool, close it. + """ + if self.connection: + if self.pooled: + # Reset the connection so it's ready for the next caller + # to grab from the pool + self.connection.reset() + ConnectionPool.put(self.connection) + else: + try: + self.connection.close() + except Exception: + # There's apparently a bug in kombu 'memory' transport + # which causes an assert failure. + # But, we probably want to ignore all exceptions when + # trying to close a connection, anyway... + pass + self.connection = None + + def __exit__(self, t, v, tb): + """end of 'with' statement. We're done here.""" + self._done() + + def __del__(self): + """Caller is done with this connection. Make sure we cleaned up.""" + self._done() + + def close(self): + """Caller is done with this connection.""" + self._done() + + def __getattr__(self, key): + """Proxy all other calls to the Connection instance""" + if self.connection: + return getattr(self.connection, key) + else: + raise exception.InvalidRPCConnectionReuse() + + +class ProxyCallback(object): + """Calls methods on a proxy object based on method and args.""" + + def __init__(self, proxy): + self.proxy = proxy + self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) + + def __call__(self, message_data): + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ + LOG.debug(_('received %s') % message_data) + ctxt = _unpack_context(message_data) + method = message_data.get('method') + args = message_data.get('args', {}) + if not method: + LOG.warn(_('no method for message: %s') % message_data) + ctxt.reply(_('No method for message: %s') % message_data) + return + self.pool.spawn_n(self._process_data, ctxt, method, args) + + @exception.wrap_exception() + def _process_data(self, ctxt, method, args): + """Thread that maigcally looks for a method on the proxy + object and calls it. + """ + + node_func = getattr(self.proxy, str(method)) + node_args = dict((str(k), v) for k, v in args.iteritems()) + # NOTE(vish): magic is fun! + try: + rval = node_func(context=ctxt, **node_args) + # Check if the result was a generator + if isinstance(rval, types.GeneratorType): + for x in rval: + ctxt.reply(x, None) + else: + ctxt.reply(rval, None) + # This final None tells multicall that it is done. + ctxt.reply(None, None) + except Exception as e: + LOG.exception('Exception during message handling') + ctxt.reply(None, sys.exc_info()) + return + + +def _unpack_context(msg): + """Unpack context from msg.""" + context_dict = {} + for key in list(msg.keys()): + # NOTE(vish): Some versions of python don't like unicode keys + # in kwargs. + key = str(key) + if key.startswith('_context_'): + value = msg.pop(key) + context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) + LOG.debug(_('unpacked context: %s'), context_dict) + return RpcContext.from_dict(context_dict) + + +def _pack_context(msg, context): + """Pack context into msg. + + Values for message keys need to be less than 255 chars, so we pull + context out into a bunch of separate keys. If we want to support + more arguments in rabbit messages, we may want to do the same + for args at some point. + + """ + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) + + +class RpcContext(context.RequestContext): + """Context that supports replying to a rpc.call""" + def __init__(self, *args, **kwargs): + msg_id = kwargs.pop('msg_id', None) + self.msg_id = msg_id + super(RpcContext, self).__init__(*args, **kwargs) + + def reply(self, *args, **kwargs): + if self.msg_id: + msg_reply(self.msg_id, *args, **kwargs) + + +class MulticallWaiter(object): + def __init__(self, connection): + self._connection = connection + self._iterator = connection.iterconsume() + self._result = None + self._done = False + + def done(self): + self._done = True + self._connection.close() + + def __call__(self, data): + """The consume() callback will call this. Store the result.""" + if data['failure']: + self._result = RemoteError(*data['failure']) + else: + self._result = data['result'] + + def __iter__(self): + """Return a result until we get a 'None' response from consumer""" + if self._done: + raise StopIteration + while True: + self._iterator.next() + result = self._result + if isinstance(result, Exception): + self.done() + raise result + if result == None: + self.done() + raise StopIteration + yield result + + +def create_connection(new=True): + """Create a connection""" + return ConnectionContext(pooled=not new) + + +def multicall(context, topic, msg): + """Make a call that returns multiple times.""" + # Can't use 'with' for multicall, as it returns an iterator + # that will continue to use the connection. When it's done, + # connection.close() will get called which will put it back into + # the pool + LOG.debug(_('Making asynchronous call on %s ...'), topic) + msg_id = uuid.uuid4().hex + msg.update({'_msg_id': msg_id}) + LOG.debug(_('MSG_ID is %s') % (msg_id)) + _pack_context(msg, context) + + conn = ConnectionContext() + wait_msg = MulticallWaiter(conn) + conn.declare_direct_consumer(msg_id, wait_msg) + conn.topic_send(topic, msg) + + return wait_msg + + +def call(context, topic, msg): + """Sends a message on a topic and wait for a response.""" + rv = multicall(context, topic, msg) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + LOG.debug(_('Making asynchronous cast on %s...'), topic) + _pack_context(msg, context) + with ConnectionContext() as conn: + conn.topic_send(topic, msg) + + +def fanout_cast(context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + LOG.debug(_('Making asynchronous fanout cast...')) + _pack_context(msg, context) + with ConnectionContext() as conn: + conn.fanout_send(topic, msg) + + +def msg_reply(msg_id, reply=None, failure=None): + """Sends a reply or an error on the channel signified by msg_id. + + Failure should be a sys.exc_info() tuple. + + """ + with ConnectionContext() as conn: + if failure: + message = str(failure[1]) + tb = traceback.format_exception(*failure) + LOG.error(_("Returning exception %s to caller"), message) + LOG.error(tb) + failure = (failure[0].__name__, str(failure[1]), tb) + + try: + msg = {'result': reply, 'failure': failure} + except TypeError: + msg = {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure} + conn.direct_send(msg_id, msg) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index e8c343a4b..7f17b642f 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -180,7 +180,6 @@ class AbstractScheduler(driver.Scheduler): for zone_id, result in child_results: if not result: continue - assert isinstance(zone_id, int) for zone_rec in zones: if zone_rec['id'] != zone_id: diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index f28353f05..22f4e14f9 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -30,6 +30,7 @@ from nova import log as logging from nova import rpc from nova import utils from nova.compute import power_state +from nova.compute import vm_states from nova.api.ec2 import ec2utils @@ -104,10 +105,8 @@ class Scheduler(object): dest, block_migration) # Changing instance_state. - db.instance_set_state(context, - instance_id, - power_state.PAUSED, - 'migrating') + values = {"vm_state": vm_states.MIGRATING} + db.instance_update(context, instance_id, values) # Changing volume state for volume_ref in instance_ref['volumes']: @@ -129,8 +128,7 @@ class Scheduler(object): """ # Checking instance is running. - if (power_state.RUNNING != instance_ref['state'] or \ - 'running' != instance_ref['state_description']): + if instance_ref['power_state'] != power_state.RUNNING: instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) raise exception.InstanceNotRunning(instance_id=instance_id) diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py new file mode 100644 index 000000000..6962dd86b --- /dev/null +++ b/nova/scheduler/vsa.py @@ -0,0 +1,535 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +VSA Simple Scheduler +""" + +from nova import context +from nova import db +from nova import flags +from nova import log as logging +from nova import rpc +from nova import utils +from nova.scheduler import driver +from nova.scheduler import simple +from nova.vsa.api import VsaState +from nova.volume import volume_types + +LOG = logging.getLogger('nova.scheduler.vsa') + +FLAGS = flags.FLAGS +flags.DEFINE_integer('drive_type_approx_capacity_percent', 10, + 'The percentage range for capacity comparison') +flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10, + 'The number of unique hosts per storage allocation') +flags.DEFINE_boolean('vsa_select_unique_drives', True, + 'Allow selection of same host for multiple drives') + + +def BYTES_TO_GB(bytes): + return bytes >> 30 + + +def GB_TO_BYTES(gb): + return gb << 30 + + +class VsaScheduler(simple.SimpleScheduler): + """Implements Scheduler for volume placement.""" + + def __init__(self, *args, **kwargs): + super(VsaScheduler, self).__init__(*args, **kwargs) + self._notify_all_volume_hosts("startup") + + def _notify_all_volume_hosts(self, event): + rpc.fanout_cast(context.get_admin_context(), + FLAGS.volume_topic, + {"method": "notification", + "args": {"event": event}}) + + def _qosgrp_match(self, drive_type, qos_values): + + def _compare_names(str1, str2): + return str1.lower() == str2.lower() + + def _compare_sizes_approxim(cap_capacity, size): + cap_capacity = BYTES_TO_GB(int(cap_capacity)) + size = int(size) + size_perc = size * \ + FLAGS.drive_type_approx_capacity_percent / 100 + + return cap_capacity >= size - size_perc and \ + cap_capacity <= size + size_perc + + # Add more entries for additional comparisons + compare_list = [{'cap1': 'DriveType', + 'cap2': 'type', + 'cmp_func': _compare_names}, + {'cap1': 'DriveCapacity', + 'cap2': 'size', + 'cmp_func': _compare_sizes_approxim}] + + for cap in compare_list: + if cap['cap1'] in qos_values.keys() and \ + cap['cap2'] in drive_type.keys() and \ + cap['cmp_func'] is not None and \ + cap['cmp_func'](qos_values[cap['cap1']], + drive_type[cap['cap2']]): + pass + else: + return False + return True + + def _get_service_states(self): + return self.zone_manager.service_states + + def _filter_hosts(self, topic, request_spec, host_list=None): + + LOG.debug(_("_filter_hosts: %(request_spec)s"), locals()) + + drive_type = request_spec['drive_type'] + LOG.debug(_("Filter hosts for drive type %s"), drive_type['name']) + + if host_list is None: + host_list = self._get_service_states().iteritems() + + filtered_hosts = [] # returns list of (hostname, capability_dict) + for host, host_dict in host_list: + for service_name, service_dict in host_dict.iteritems(): + if service_name != topic: + continue + + gos_info = service_dict.get('drive_qos_info', {}) + for qosgrp, qos_values in gos_info.iteritems(): + if self._qosgrp_match(drive_type, qos_values): + if qos_values['AvailableCapacity'] > 0: + filtered_hosts.append((host, gos_info)) + else: + LOG.debug(_("Host %s has no free capacity. Skip"), + host) + break + + host_names = [item[0] for item in filtered_hosts] + LOG.debug(_("Filter hosts: %s"), host_names) + return filtered_hosts + + def _allowed_to_use_host(self, host, selected_hosts, unique): + if unique == False or \ + host not in [item[0] for item in selected_hosts]: + return True + else: + return False + + def _add_hostcap_to_list(self, selected_hosts, host, cap): + if host not in [item[0] for item in selected_hosts]: + selected_hosts.append((host, cap)) + + def host_selection_algorithm(self, request_spec, all_hosts, + selected_hosts, unique): + """Must override this method for VSA scheduler to work.""" + raise NotImplementedError(_("Must implement host selection mechanism")) + + def _select_hosts(self, request_spec, all_hosts, selected_hosts=None): + + if selected_hosts is None: + selected_hosts = [] + + host = None + if len(selected_hosts) >= FLAGS.vsa_unique_hosts_per_alloc: + # try to select from already selected hosts only + LOG.debug(_("Maximum number of hosts selected (%d)"), + len(selected_hosts)) + unique = False + (host, qos_cap) = self.host_selection_algorithm(request_spec, + selected_hosts, + selected_hosts, + unique) + + LOG.debug(_("Selected excessive host %(host)s"), locals()) + else: + unique = FLAGS.vsa_select_unique_drives + + if host is None: + # if we've not tried yet (# of sel hosts < max) - unique=True + # or failed to select from selected_hosts - unique=False + # select from all hosts + (host, qos_cap) = self.host_selection_algorithm(request_spec, + all_hosts, + selected_hosts, + unique) + if host is None: + raise driver.WillNotSchedule(_("No available hosts")) + + return (host, qos_cap) + + def _provision_volume(self, context, vol, vsa_id, availability_zone): + + if availability_zone is None: + availability_zone = FLAGS.storage_availability_zone + + now = utils.utcnow() + options = { + 'size': vol['size'], + 'user_id': context.user_id, + 'project_id': context.project_id, + 'snapshot_id': None, + 'availability_zone': availability_zone, + 'status': "creating", + 'attach_status': "detached", + 'display_name': vol['name'], + 'display_description': vol['description'], + 'volume_type_id': vol['volume_type_id'], + 'metadata': dict(to_vsa_id=vsa_id), + 'host': vol['host'], + 'scheduled_at': now + } + + size = vol['size'] + host = vol['host'] + name = vol['name'] + LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\ + "host %(host)s"), locals()) + + volume_ref = db.volume_create(context, options) + rpc.cast(context, + db.queue_get_for(context, "volume", vol['host']), + {"method": "create_volume", + "args": {"volume_id": volume_ref['id'], + "snapshot_id": None}}) + + def _check_host_enforcement(self, context, availability_zone): + if (availability_zone + and ':' in availability_zone + and context.is_admin): + zone, _x, host = availability_zone.partition(':') + service = db.service_get_by_args(context.elevated(), host, + 'nova-volume') + if not self.service_is_up(service): + raise driver.WillNotSchedule(_("Host %s not available") % host) + + return host + else: + return None + + def _assign_hosts_to_volumes(self, context, volume_params, forced_host): + + prev_volume_type_id = None + request_spec = {} + selected_hosts = [] + + LOG.debug(_("volume_params %(volume_params)s") % locals()) + + i = 1 + for vol in volume_params: + name = vol['name'] + LOG.debug(_("%(i)d: Volume %(name)s"), locals()) + i += 1 + + if forced_host: + vol['host'] = forced_host + vol['capabilities'] = None + continue + + volume_type_id = vol['volume_type_id'] + request_spec['size'] = vol['size'] + + if prev_volume_type_id is None or\ + prev_volume_type_id != volume_type_id: + # generate list of hosts for this drive type + + volume_type = volume_types.get_volume_type(context, + volume_type_id) + drive_type = { + 'name': volume_type['extra_specs'].get('drive_name'), + 'type': volume_type['extra_specs'].get('drive_type'), + 'size': int(volume_type['extra_specs'].get('drive_size')), + 'rpm': volume_type['extra_specs'].get('drive_rpm'), + } + request_spec['drive_type'] = drive_type + + all_hosts = self._filter_hosts("volume", request_spec) + prev_volume_type_id = volume_type_id + + (host, qos_cap) = self._select_hosts(request_spec, + all_hosts, selected_hosts) + vol['host'] = host + vol['capabilities'] = qos_cap + self._consume_resource(qos_cap, vol['size'], -1) + + def schedule_create_volumes(self, context, request_spec, + availability_zone=None, *_args, **_kwargs): + """Picks hosts for hosting multiple volumes.""" + + num_volumes = request_spec.get('num_volumes') + LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") % + locals()) + + vsa_id = request_spec.get('vsa_id') + volume_params = request_spec.get('volumes') + + host = self._check_host_enforcement(context, availability_zone) + + try: + self._print_capabilities_info() + + self._assign_hosts_to_volumes(context, volume_params, host) + + for vol in volume_params: + self._provision_volume(context, vol, vsa_id, availability_zone) + except: + if vsa_id: + db.vsa_update(context, vsa_id, dict(status=VsaState.FAILED)) + + for vol in volume_params: + if 'capabilities' in vol: + self._consume_resource(vol['capabilities'], + vol['size'], 1) + raise + + return None + + def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): + """Picks the best host based on requested drive type capability.""" + volume_ref = db.volume_get(context, volume_id) + + host = self._check_host_enforcement(context, + volume_ref['availability_zone']) + if host: + now = utils.utcnow() + db.volume_update(context, volume_id, {'host': host, + 'scheduled_at': now}) + return host + + volume_type_id = volume_ref['volume_type_id'] + if volume_type_id: + volume_type = volume_types.get_volume_type(context, volume_type_id) + + if volume_type_id is None or\ + volume_types.is_vsa_volume(volume_type_id, volume_type): + + LOG.debug(_("Non-VSA volume %d"), volume_ref['id']) + return super(VsaScheduler, self).schedule_create_volume(context, + volume_id, *_args, **_kwargs) + + self._print_capabilities_info() + + drive_type = { + 'name': volume_type['extra_specs'].get('drive_name'), + 'type': volume_type['extra_specs'].get('drive_type'), + 'size': int(volume_type['extra_specs'].get('drive_size')), + 'rpm': volume_type['extra_specs'].get('drive_rpm'), + } + + LOG.debug(_("Spawning volume %(volume_id)s with drive type "\ + "%(drive_type)s"), locals()) + + request_spec = {'size': volume_ref['size'], + 'drive_type': drive_type} + hosts = self._filter_hosts("volume", request_spec) + + try: + (host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts) + except: + if volume_ref['to_vsa_id']: + db.vsa_update(context, volume_ref['to_vsa_id'], + dict(status=VsaState.FAILED)) + raise + + if host: + now = utils.utcnow() + db.volume_update(context, volume_id, {'host': host, + 'scheduled_at': now}) + self._consume_resource(qos_cap, volume_ref['size'], -1) + return host + + def _consume_full_drive(self, qos_values, direction): + qos_values['FullDrive']['NumFreeDrives'] += direction + qos_values['FullDrive']['NumOccupiedDrives'] -= direction + + def _consume_partition(self, qos_values, size, direction): + + if qos_values['PartitionDrive']['PartitionSize'] != 0: + partition_size = qos_values['PartitionDrive']['PartitionSize'] + else: + partition_size = size + part_per_drive = qos_values['DriveCapacity'] / partition_size + + if direction == -1 and \ + qos_values['PartitionDrive']['NumFreePartitions'] == 0: + + self._consume_full_drive(qos_values, direction) + qos_values['PartitionDrive']['NumFreePartitions'] += \ + part_per_drive + + qos_values['PartitionDrive']['NumFreePartitions'] += direction + qos_values['PartitionDrive']['NumOccupiedPartitions'] -= direction + + if direction == 1 and \ + qos_values['PartitionDrive']['NumFreePartitions'] >= \ + part_per_drive: + + self._consume_full_drive(qos_values, direction) + qos_values['PartitionDrive']['NumFreePartitions'] -= \ + part_per_drive + + def _consume_resource(self, qos_values, size, direction): + if qos_values is None: + LOG.debug(_("No capability selected for volume of size %(size)s"), + locals()) + return + + if size == 0: # full drive match + qos_values['AvailableCapacity'] += direction * \ + qos_values['DriveCapacity'] + self._consume_full_drive(qos_values, direction) + else: + qos_values['AvailableCapacity'] += direction * GB_TO_BYTES(size) + self._consume_partition(qos_values, GB_TO_BYTES(size), direction) + return + + def _print_capabilities_info(self): + host_list = self._get_service_states().iteritems() + for host, host_dict in host_list: + for service_name, service_dict in host_dict.iteritems(): + if service_name != "volume": + continue + + LOG.info(_("Host %s:"), host) + + gos_info = service_dict.get('drive_qos_info', {}) + for qosgrp, qos_values in gos_info.iteritems(): + total = qos_values['TotalDrives'] + used = qos_values['FullDrive']['NumOccupiedDrives'] + free = qos_values['FullDrive']['NumFreeDrives'] + avail = BYTES_TO_GB(qos_values['AvailableCapacity']) + + LOG.info(_("\tDrive %(qosgrp)-25s: total %(total)2s, "\ + "used %(used)2s, free %(free)2s. Available "\ + "capacity %(avail)-5s"), locals()) + + +class VsaSchedulerLeastUsedHost(VsaScheduler): + """ + Implements VSA scheduler to select the host with least used capacity + of particular type. + """ + + def __init__(self, *args, **kwargs): + super(VsaSchedulerLeastUsedHost, self).__init__(*args, **kwargs) + + def host_selection_algorithm(self, request_spec, all_hosts, + selected_hosts, unique): + size = request_spec['size'] + drive_type = request_spec['drive_type'] + best_host = None + best_qoscap = None + best_cap = None + min_used = 0 + + for (host, capabilities) in all_hosts: + + has_enough_capacity = False + used_capacity = 0 + for qosgrp, qos_values in capabilities.iteritems(): + + used_capacity = used_capacity + qos_values['TotalCapacity'] \ + - qos_values['AvailableCapacity'] + + if self._qosgrp_match(drive_type, qos_values): + # we found required qosgroup + + if size == 0: # full drive match + if qos_values['FullDrive']['NumFreeDrives'] > 0: + has_enough_capacity = True + matched_qos = qos_values + else: + break + else: + if qos_values['AvailableCapacity'] >= size and \ + (qos_values['PartitionDrive'][ + 'NumFreePartitions'] > 0 or \ + qos_values['FullDrive']['NumFreeDrives'] > 0): + has_enough_capacity = True + matched_qos = qos_values + else: + break + + if has_enough_capacity and \ + self._allowed_to_use_host(host, + selected_hosts, + unique) and \ + (best_host is None or used_capacity < min_used): + + min_used = used_capacity + best_host = host + best_qoscap = matched_qos + best_cap = capabilities + + if best_host: + self._add_hostcap_to_list(selected_hosts, best_host, best_cap) + min_used = BYTES_TO_GB(min_used) + LOG.debug(_("\t LeastUsedHost: Best host: %(best_host)s. "\ + "(used capacity %(min_used)s)"), locals()) + return (best_host, best_qoscap) + + +class VsaSchedulerMostAvailCapacity(VsaScheduler): + """ + Implements VSA scheduler to select the host with most available capacity + of one particular type. + """ + + def __init__(self, *args, **kwargs): + super(VsaSchedulerMostAvailCapacity, self).__init__(*args, **kwargs) + + def host_selection_algorithm(self, request_spec, all_hosts, + selected_hosts, unique): + size = request_spec['size'] + drive_type = request_spec['drive_type'] + best_host = None + best_qoscap = None + best_cap = None + max_avail = 0 + + for (host, capabilities) in all_hosts: + for qosgrp, qos_values in capabilities.iteritems(): + if self._qosgrp_match(drive_type, qos_values): + # we found required qosgroup + + if size == 0: # full drive match + available = qos_values['FullDrive']['NumFreeDrives'] + else: + available = qos_values['AvailableCapacity'] + + if available > max_avail and \ + self._allowed_to_use_host(host, + selected_hosts, + unique): + max_avail = available + best_host = host + best_qoscap = qos_values + best_cap = capabilities + break # go to the next host + + if best_host: + self._add_hostcap_to_list(selected_hosts, best_host, best_cap) + type_str = "drives" if size == 0 else "bytes" + LOG.debug(_("\t MostAvailCap: Best host: %(best_host)s. "\ + "(available %(max_avail)s %(type_str)s)"), locals()) + + return (best_host, best_qoscap) diff --git a/nova/service.py b/nova/service.py index 959e79052..247eb4fb1 100644 --- a/nova/service.py +++ b/nova/service.py @@ -153,26 +153,15 @@ class Service(object): self.topic) # Share this same connection for these Consumers - consumer_all = rpc.create_consumer(self.conn, self.topic, self, - fanout=False) + self.conn.create_consumer(self.topic, self, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) - consumer_node = rpc.create_consumer(self.conn, node_topic, self, - fanout=False) + self.conn.create_consumer(node_topic, self, fanout=False) - fanout = rpc.create_consumer(self.conn, self.topic, self, fanout=True) + self.conn.create_consumer(self.topic, self, fanout=True) - consumers = [consumer_all, consumer_node, fanout] - consumer_set = rpc.create_consumer_set(self.conn, consumers) - - # Wait forever, processing these consumers - def _wait(): - try: - consumer_set.wait() - finally: - consumer_set.close() - - self.consumer_set_thread = eventlet.spawn(_wait) + # Consume from all consumers in a thread + self.conn.consume_in_thread() if self.report_interval: pulse = utils.LoopingCall(self.report_state) @@ -237,10 +226,11 @@ class Service(object): logging.warn(_('Service killed that has no database entry')) def stop(self): - self.consumer_set_thread.kill() + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway try: - self.consumer_set_thread.wait() - except greenlet.GreenletExit: + self.conn.close() + except Exception: pass for x in self.timers: try: diff --git a/nova/test.py b/nova/test.py index 88f1489e8..d759aef60 100644 --- a/nova/test.py +++ b/nova/test.py @@ -277,3 +277,21 @@ class TestCase(unittest.TestCase): continue else: self.assertEqual(sub_value, super_value) + + def assertIn(self, a, b, *args, **kwargs): + """Python < v2.7 compatibility. Assert 'a' in 'b'""" + try: + f = super(TestCase, self).assertIn + except AttributeError: + self.assertTrue(a in b, *args, **kwargs) + else: + f(a, b, *args, **kwargs) + + def assertNotIn(self, a, b, *args, **kwargs): + """Python < v2.7 compatibility. Assert 'a' NOT in 'b'""" + try: + f = super(TestCase, self).assertNotIn + except AttributeError: + self.assertFalse(a in b, *args, **kwargs) + else: + f(a, b, *args, **kwargs) diff --git a/nova/tests/api/ec2/__init__.py b/nova/tests/api/ec2/__init__.py new file mode 100644 index 000000000..6dab802f2 --- /dev/null +++ b/nova/tests/api/ec2/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Openstack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from nova.tests import * diff --git a/nova/tests/test_middleware.py b/nova/tests/api/ec2/test_middleware.py index 40d117c45..295f6c4ea 100644 --- a/nova/tests/test_middleware.py +++ b/nova/tests/api/ec2/test_middleware.py @@ -21,10 +21,13 @@ import webob.dec import webob.exc from nova.api import ec2 +from nova import context +from nova import exception from nova import flags from nova import test from nova import utils +from xml.etree.ElementTree import fromstring as xml_to_tree FLAGS = flags.FLAGS @@ -83,3 +86,45 @@ class LockoutTestCase(test.TestCase): utils.advance_time_seconds(FLAGS.lockout_window * 60) self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) self.assertFalse(self._is_locked_out('test')) + + +class ExecutorTestCase(test.TestCase): + def setUp(self): + super(ExecutorTestCase, self).setUp() + self.executor = ec2.Executor() + + def _execute(self, invoke): + class Fake(object): + pass + fake_ec2_request = Fake() + fake_ec2_request.invoke = invoke + + fake_wsgi_request = Fake() + + fake_wsgi_request.environ = { + 'nova.context': context.get_admin_context(), + 'ec2.request': fake_ec2_request, + } + return self.executor(fake_wsgi_request) + + def _extract_message(self, result): + tree = xml_to_tree(result.body) + return tree.findall('./Errors')[0].find('Error/Message').text + + def test_instance_not_found(self): + def not_found(context): + raise exception.InstanceNotFound(instance_id=5) + result = self._execute(not_found) + self.assertIn('i-00000005', self._extract_message(result)) + + def test_snapshot_not_found(self): + def not_found(context): + raise exception.SnapshotNotFound(snapshot_id=5) + result = self._execute(not_found) + self.assertIn('snap-00000005', self._extract_message(result)) + + def test_volume_not_found(self): + def not_found(context): + raise exception.VolumeNotFound(volume_id=5) + result = self._execute(not_found) + self.assertIn('vol-00000005', self._extract_message(result)) diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index e5eed14fe..089c8e59d 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -23,6 +23,7 @@ from xml.dom import minidom import stubout import webob +from nova import db from nova import exception from nova import flags from nova import test @@ -76,6 +77,8 @@ class CreateserverextTest(test.TestCase): def __init__(self): self.injected_files = None self.networks = None + self.user_data = None + self.db = db def create(self, *args, **kwargs): if 'injected_files' in kwargs: @@ -87,6 +90,10 @@ class CreateserverextTest(test.TestCase): self.networks = kwargs['requested_networks'] else: self.networks = None + + if 'user_data' in kwargs: + self.user_data = kwargs['user_data'] + return [{'id': '1234', 'display_name': 'fakeinstance', 'uuid': FAKE_UUID, 'created_at': "", @@ -119,6 +126,14 @@ class CreateserverextTest(test.TestCase): server['networks'] = network_list return {'server': server} + def _create_user_data_request_dict(self, user_data): + server = {} + server['name'] = 'new-server-test' + server['imageRef'] = 1 + server['flavorRef'] = 1 + server['user_data'] = user_data + return {'server': server} + def _get_create_request_json(self, body_dict): req = webob.Request.blank('/v1.1/123/os-create-server-ext') req.headers['Content-Type'] = 'application/json' @@ -178,6 +193,13 @@ class CreateserverextTest(test.TestCase): self._run_create_instance_with_mock_compute_api(request) return request, response, compute_api.networks + def _create_instance_with_user_data_json(self, networks): + body_dict = self._create_user_data_request_dict(networks) + request = self._get_create_request_json(body_dict) + compute_api, response = \ + self._run_create_instance_with_mock_compute_api(request) + return request, response, compute_api.user_data + def _create_instance_with_networks_xml(self, networks): body_dict = self._create_networks_request_dict(networks) request = self._get_create_request_xml(body_dict) @@ -304,3 +326,25 @@ class CreateserverextTest(test.TestCase): self.assertEquals(response.status_int, 202) self.assertEquals(compute_api.networks, [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]) + + def test_create_instance_with_userdata(self): + user_data_contents = '#!/bin/bash\necho "Oh no!"\n' + user_data_contents = base64.b64encode(user_data_contents) + request, response, user_data = \ + self._create_instance_with_user_data_json(user_data_contents) + self.assertEquals(response.status_int, 202) + self.assertEquals(user_data, user_data_contents) + + def test_create_instance_with_userdata_none(self): + user_data_contents = None + request, response, user_data = \ + self._create_instance_with_user_data_json(user_data_contents) + self.assertEquals(response.status_int, 202) + self.assertEquals(user_data, user_data_contents) + + def test_create_instance_with_userdata_with_non_b64_content(self): + user_data_contents = '#!/bin/bash\necho "Oh no!"\n' + request, response, user_data = \ + self._create_instance_with_user_data_json(user_data_contents) + self.assertEquals(response.status_int, 400) + self.assertEquals(user_data, None) diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index 568faf867..642f2b841 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -20,9 +20,11 @@ import webob from nova import compute from nova import context from nova import db -from nova import test from nova import network +from nova import rpc +from nova import test from nova.tests.api.openstack import fakes +from nova.tests.api.openstack import test_servers from nova.api.openstack.contrib.floating_ips import FloatingIPController @@ -36,14 +38,13 @@ def network_api_get_floating_ip(self, context, id): def network_api_get_floating_ip_by_ip(self, context, address): return {'id': 1, 'address': '10.10.10.10', - 'fixed_ip': {'address': '11.0.0.1'}} + 'fixed_ip': {'address': '10.0.0.1', 'instance_id': 1}}, def network_api_list_floating_ips(self, context): return [{'id': 1, 'address': '10.10.10.10', - 'instance': {'id': 11}, - 'fixed_ip': {'address': '10.0.0.1'}}, + 'fixed_ip': {'address': '10.0.0.1', 'instance_id': 1}}, {'id': 2, 'address': '10.10.10.11'}] @@ -60,10 +61,38 @@ def compute_api_associate(self, context, instance_id, floating_ip): pass +def network_api_associate(self, context, floating_ip, fixed_ip): + pass + + def network_api_disassociate(self, context, floating_address): pass +def network_get_instance_nw_info(self, context, instance): + info = { + 'label': 'fake', + 'gateway': 'fake', + 'dhcp_server': 'fake', + 'broadcast': 'fake', + 'mac': 'fake', + 'vif_uuid': 'fake', + 'rxtx_cap': 'fake', + 'dns': [], + 'ips': [{'ip': '10.0.0.1'}], + 'should_create_bridge': False, + 'should_create_vlan': False} + + return [['ignore', info]] + + +def fake_instance_get(context, instance_id): + return { + "id": 1, + "user_id": 'fakeuser', + "project_id": '123'} + + class FloatingIpTest(test.TestCase): address = "10.10.10.10" @@ -79,23 +108,21 @@ class FloatingIpTest(test.TestCase): def setUp(self): super(FloatingIpTest, self).setUp() - self.controller = FloatingIPController() - fakes.stub_out_networking(self.stubs) - fakes.stub_out_rate_limiting(self.stubs) self.stubs.Set(network.api.API, "get_floating_ip", network_api_get_floating_ip) self.stubs.Set(network.api.API, "get_floating_ip_by_ip", network_api_get_floating_ip) self.stubs.Set(network.api.API, "list_floating_ips", network_api_list_floating_ips) - self.stubs.Set(network.api.API, "allocate_floating_ip", - network_api_allocate) self.stubs.Set(network.api.API, "release_floating_ip", network_api_release) - self.stubs.Set(compute.api.API, "associate_floating_ip", - compute_api_associate) self.stubs.Set(network.api.API, "disassociate_floating_ip", network_api_disassociate) + self.stubs.Set(network.api.API, "get_instance_nw_info", + network_get_instance_nw_info) + self.stubs.Set(db.api, 'instance_get', + fake_instance_get) + self.context = context.get_admin_context() self._create_floating_ip() @@ -124,7 +151,7 @@ class FloatingIpTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) - response = {'floating_ips': [{'instance_id': 11, + response = {'floating_ips': [{'instance_id': 1, 'ip': '10.10.10.10', 'fixed_ip': '10.0.0.1', 'id': 1}, @@ -143,7 +170,34 @@ class FloatingIpTest(test.TestCase): self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10') self.assertEqual(res_dict['floating_ip']['instance_id'], None) + def test_show_associated_floating_ip(self): + def get_floating_ip(self, context, id): + return {'id': 1, 'address': '10.10.10.10', + 'fixed_ip': {'address': '10.0.0.1', 'instance_id': 1}} + self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip) + + req = webob.Request.blank('/v1.1/123/os-floating-ips/1') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['floating_ip']['id'], 1) + self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10') + self.assertEqual(res_dict['floating_ip']['instance_id'], 1) + + def test_floating_ip_allocate_no_free_ips(self): + def fake_call(*args, **kwargs): + raise(rpc.RemoteError('NoMoreFloatingIps', '', '')) + + self.stubs.Set(rpc, "call", fake_call) + req = webob.Request.blank('/v1.1/123/os-floating-ips') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + def test_floating_ip_allocate(self): + self.stubs.Set(network.api.API, "allocate_floating_ip", + network_api_allocate) req = webob.Request.blank('/v1.1/123/os-floating-ips') req.method = 'POST' req.headers['Content-Type'] = 'application/json' @@ -165,6 +219,8 @@ class FloatingIpTest(test.TestCase): self.assertEqual(res.status_int, 202) def test_add_floating_ip_to_instance(self): + self.stubs.Set(network.api.API, "associate_floating_ip", + network_api_associate) body = dict(addFloatingIp=dict(address='11.0.0.1')) req = webob.Request.blank('/v1.1/123/servers/test_inst/action') req.method = "POST" @@ -174,6 +230,65 @@ class FloatingIpTest(test.TestCase): resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 202) + def test_associate_floating_ip_to_instance_wrong_project_id(self): + def fake_fixed_ip_get_by_address(ctx, address, session=None): + return {'address': address, 'network': {'multi_host': None, + 'host': 'fake'}} + self.stubs.Set(db.api, "fixed_ip_get_by_address", + fake_fixed_ip_get_by_address) + db.floating_ip_update(self.context, self.address, {'project_id': 'bad', + 'fixed_ip_id': 1}) + body = dict(addFloatingIp=dict(address=self.address)) + req = webob.Request.blank('/v1.1/123/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_associate_floating_ip_to_instance_no_project_id(self): + def fake_fixed_ip_get_by_address(ctx, address, session=None): + return {'address': address, 'network': {'multi_host': None, + 'host': 'fake'}} + self.stubs.Set(db.api, "fixed_ip_get_by_address", + fake_fixed_ip_get_by_address) + db.floating_ip_update(self.context, self.address, {'project_id': None, + 'fixed_ip_id': 1}) + body = dict(addFloatingIp=dict(address=self.address)) + req = webob.Request.blank('/v1.1/123/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_add_associated_floating_ip_to_instance(self): + def fake_fixed_ip_get_by_address(ctx, address, session=None): + return {'address': address, 'network': {'multi_host': None, + 'host': 'fake'}} + + self.disassociated = False + + def fake_network_api_disassociate(local_self, ctx, floating_address): + self.disassociated = True + + db.floating_ip_update(self.context, self.address, {'project_id': '123', + 'fixed_ip_id': 1}) + self.stubs.Set(network.api.API, "disassociate_floating_ip", + fake_network_api_disassociate) + self.stubs.Set(db.api, "fixed_ip_get_by_address", + fake_fixed_ip_get_by_address) + + body = dict(addFloatingIp=dict(address=self.address)) + req = webob.Request.blank('/v1.1/123/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 202) + self.assertTrue(self.disassociated) + def test_remove_floating_ip_from_instance(self): body = dict(removeFloatingIp=dict(address='11.0.0.1')) req = webob.Request.blank('/v1.1/123/servers/test_inst/action') diff --git a/nova/tests/api/openstack/contrib/test_security_groups.py b/nova/tests/api/openstack/contrib/test_security_groups.py index bc1536911..0816a6312 100644 --- a/nova/tests/api/openstack/contrib/test_security_groups.py +++ b/nova/tests/api/openstack/contrib/test_security_groups.py @@ -360,7 +360,7 @@ class TestSecurityGroups(test.TestCase): def test_associate_by_invalid_server_id(self): body = dict(addSecurityGroup=dict(name='test')) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) req = webob.Request.blank('/v1.1/123/servers/invalid/action') req.headers['Content-Type'] = 'application/json' @@ -372,7 +372,7 @@ class TestSecurityGroups(test.TestCase): def test_associate_without_body(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(addSecurityGroup=None) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -382,7 +382,7 @@ class TestSecurityGroups(test.TestCase): def test_associate_no_security_group_name(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(addSecurityGroup=dict()) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -392,7 +392,7 @@ class TestSecurityGroups(test.TestCase): def test_associate_security_group_name_with_whitespaces(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(addSecurityGroup=dict(name=" ")) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -400,9 +400,9 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_associate_non_existing_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant) + self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) body = dict(addSecurityGroup=dict(name="test")) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) req = webob.Request.blank('/v1.1/123/servers/10000/action') req.headers['Content-Type'] = 'application/json' @@ -412,8 +412,8 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 404) def test_associate_non_running_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_non_running_server) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'instance_get', return_non_running_server) + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group_without_instances) body = dict(addSecurityGroup=dict(name="test")) req = webob.Request.blank('/v1.1/123/servers/1/action') @@ -424,8 +424,8 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_associate_already_associated_security_group_to_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) body = dict(addSecurityGroup=dict(name="test")) req = webob.Request.blank('/v1.1/123/servers/1/action') @@ -436,12 +436,12 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_associate(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.mox.StubOutWithMock(nova.db, 'instance_add_security_group') - nova.db.instance_add_security_group(mox.IgnoreArg(), + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db.api, 'instance_add_security_group') + nova.db.api.instance_add_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group_without_instances) self.mox.ReplayAll() @@ -454,12 +454,12 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 202) def test_associate_xml(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.mox.StubOutWithMock(nova.db, 'instance_add_security_group') - nova.db.instance_add_security_group(mox.IgnoreArg(), + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db.api, 'instance_add_security_group') + nova.db.api.instance_add_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group_without_instances) self.mox.ReplayAll() @@ -483,7 +483,7 @@ class TestSecurityGroups(test.TestCase): def test_disassociate_by_invalid_server_id(self): body = dict(removeSecurityGroup=dict(name='test')) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) req = webob.Request.blank('/v1.1/123/servers/invalid/action') req.headers['Content-Type'] = 'application/json' @@ -495,7 +495,7 @@ class TestSecurityGroups(test.TestCase): def test_disassociate_without_body(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(removeSecurityGroup=None) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -505,7 +505,7 @@ class TestSecurityGroups(test.TestCase): def test_disassociate_no_security_group_name(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(removeSecurityGroup=dict()) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -515,7 +515,7 @@ class TestSecurityGroups(test.TestCase): def test_disassociate_security_group_name_with_whitespaces(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(removeSecurityGroup=dict(name=" ")) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -523,9 +523,9 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_disassociate_non_existing_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant) + self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) body = dict(removeSecurityGroup=dict(name="test")) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) req = webob.Request.blank('/v1.1/123/servers/10000/action') req.headers['Content-Type'] = 'application/json' @@ -535,8 +535,8 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 404) def test_disassociate_non_running_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_non_running_server) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'instance_get', return_non_running_server) + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) body = dict(removeSecurityGroup=dict(name="test")) req = webob.Request.blank('/v1.1/123/servers/1/action') @@ -547,8 +547,8 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_disassociate_already_associated_security_group_to_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group_without_instances) body = dict(removeSecurityGroup=dict(name="test")) req = webob.Request.blank('/v1.1/123/servers/1/action') @@ -559,12 +559,12 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_disassociate(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group') - nova.db.instance_remove_security_group(mox.IgnoreArg(), + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db.api, 'instance_remove_security_group') + nova.db.api.instance_remove_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) self.mox.ReplayAll() @@ -577,12 +577,12 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 202) def test_disassociate_xml(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group') - nova.db.instance_remove_security_group(mox.IgnoreArg(), + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db.api, 'instance_remove_security_group') + nova.db.api.instance_remove_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) self.mox.ReplayAll() diff --git a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py new file mode 100644 index 000000000..2430b9d51 --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py @@ -0,0 +1,172 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import webob + +from nova import context +from nova import flags +from nova import test +from nova.compute import api +from nova.tests.api.openstack import fakes + + +FLAGS = flags.FLAGS + +SERVERS = 5 +TENANTS = 2 +HOURS = 24 +LOCAL_GB = 10 +MEMORY_MB = 1024 +VCPUS = 2 +STOP = datetime.datetime.utcnow() +START = STOP - datetime.timedelta(hours=HOURS) + + +def fake_instance_type_get(self, context, instance_type_id): + return {'id': 1, + 'vcpus': VCPUS, + 'local_gb': LOCAL_GB, + 'memory_mb': MEMORY_MB, + 'name': + 'fakeflavor'} + + +def get_fake_db_instance(start, end, instance_id, tenant_id): + return {'id': instance_id, + 'image_ref': '1', + 'project_id': tenant_id, + 'user_id': 'fakeuser', + 'display_name': 'name', + 'state_description': 'state', + 'instance_type_id': 1, + 'launched_at': start, + 'terminated_at': end} + + +def fake_instance_get_active_by_window(self, context, begin, end, project_id): + return [get_fake_db_instance(START, + STOP, + x, + "faketenant_%s" % (x / SERVERS)) + for x in xrange(TENANTS * SERVERS)] + + +class SimpleTenantUsageTest(test.TestCase): + def setUp(self): + super(SimpleTenantUsageTest, self).setUp() + self.stubs.Set(api.API, "get_instance_type", + fake_instance_type_get) + self.stubs.Set(api.API, "get_active_by_window", + fake_instance_get_active_by_window) + self.admin_context = context.RequestContext('fakeadmin_0', + 'faketenant_0', + is_admin=True) + self.user_context = context.RequestContext('fakeadmin_0', + 'faketenant_0', + is_admin=False) + self.alt_user_context = context.RequestContext('fakeadmin_0', + 'faketenant_1', + is_admin=False) + FLAGS.allow_admin_api = True + + def test_verify_index(self): + req = webob.Request.blank( + '/v1.1/123/os-simple-tenant-usage?start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.admin_context)) + + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + usages = res_dict['tenant_usages'] + from nova import log as logging + logging.warn(usages) + for i in xrange(TENANTS): + self.assertEqual(int(usages[i]['total_hours']), + SERVERS * HOURS) + self.assertEqual(int(usages[i]['total_local_gb_usage']), + SERVERS * LOCAL_GB * HOURS) + self.assertEqual(int(usages[i]['total_memory_mb_usage']), + SERVERS * MEMORY_MB * HOURS) + self.assertEqual(int(usages[i]['total_vcpus_usage']), + SERVERS * VCPUS * HOURS) + self.assertFalse(usages[i].get('server_usages')) + + def test_verify_detailed_index(self): + req = webob.Request.blank( + '/v1.1/123/os-simple-tenant-usage?' + 'detailed=1&start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.admin_context)) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + usages = res_dict['tenant_usages'] + for i in xrange(TENANTS): + servers = usages[i]['server_usages'] + for j in xrange(SERVERS): + self.assertEqual(int(servers[j]['hours']), HOURS) + + def test_verify_index_fails_for_nonadmin(self): + req = webob.Request.blank( + '/v1.1/123/os-simple-tenant-usage?' + 'detailed=1&start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 403) + + def test_verify_show(self): + req = webob.Request.blank( + '/v1.1/faketenant_0/os-simple-tenant-usage/' + 'faketenant_0?start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_context)) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + + usage = res_dict['tenant_usage'] + servers = usage['server_usages'] + self.assertEqual(len(usage['server_usages']), SERVERS) + for j in xrange(SERVERS): + self.assertEqual(int(servers[j]['hours']), HOURS) + + def test_verify_show_cant_view_other_tenant(self): + req = webob.Request.blank( + '/v1.1/faketenant_1/os-simple-tenant-usage/' + 'faketenant_0?start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.alt_user_context)) + self.assertEqual(res.status_int, 403) diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py new file mode 100644 index 000000000..311b6cb8d --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_vsa.py @@ -0,0 +1,450 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import unittest +import webob + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import test +from nova import volume +from nova import vsa +from nova.api import openstack +from nova.tests.api.openstack import fakes +import nova.wsgi + +from nova.api.openstack.contrib.virtual_storage_arrays import _vsa_view + +FLAGS = flags.FLAGS + +LOG = logging.getLogger('nova.tests.api.openstack.vsa') + +last_param = {} + + +def _get_default_vsa_param(): + return { + 'display_name': 'Test_VSA_name', + 'display_description': 'Test_VSA_description', + 'vc_count': 1, + 'instance_type': 'm1.small', + 'instance_type_id': 5, + 'image_name': None, + 'availability_zone': None, + 'storage': [], + 'shared': False + } + + +def stub_vsa_create(self, context, **param): + global last_param + LOG.debug(_("_create: param=%s"), param) + param['id'] = 123 + param['name'] = 'Test name' + param['instance_type_id'] = 5 + last_param = param + return param + + +def stub_vsa_delete(self, context, vsa_id): + global last_param + last_param = dict(vsa_id=vsa_id) + + LOG.debug(_("_delete: %s"), locals()) + if vsa_id != '123': + raise exception.NotFound + + +def stub_vsa_get(self, context, vsa_id): + global last_param + last_param = dict(vsa_id=vsa_id) + + LOG.debug(_("_get: %s"), locals()) + if vsa_id != '123': + raise exception.NotFound + + param = _get_default_vsa_param() + param['id'] = vsa_id + return param + + +def stub_vsa_get_all(self, context): + LOG.debug(_("_get_all: %s"), locals()) + param = _get_default_vsa_param() + param['id'] = 123 + return [param] + + +class VSAApiTest(test.TestCase): + def setUp(self): + super(VSAApiTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + self.stubs.Set(vsa.api.API, "create", stub_vsa_create) + self.stubs.Set(vsa.api.API, "delete", stub_vsa_delete) + self.stubs.Set(vsa.api.API, "get", stub_vsa_get) + self.stubs.Set(vsa.api.API, "get_all", stub_vsa_get_all) + + self.context = context.get_admin_context() + + def tearDown(self): + self.stubs.UnsetAll() + super(VSAApiTest, self).tearDown() + + def test_vsa_create(self): + global last_param + last_param = {} + + vsa = {"displayName": "VSA Test Name", + "displayDescription": "VSA Test Desc"} + body = dict(vsa=vsa) + req = webob.Request.blank('/v1.1/777/zadr-vsa') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + # Compare if parameters were correctly passed to stub + self.assertEqual(last_param['display_name'], "VSA Test Name") + self.assertEqual(last_param['display_description'], "VSA Test Desc") + + resp_dict = json.loads(resp.body) + self.assertTrue('vsa' in resp_dict) + self.assertEqual(resp_dict['vsa']['displayName'], vsa['displayName']) + self.assertEqual(resp_dict['vsa']['displayDescription'], + vsa['displayDescription']) + + def test_vsa_create_no_body(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa') + req.method = 'POST' + req.body = json.dumps({}) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 422) + + def test_vsa_delete(self): + global last_param + last_param = {} + + vsa_id = 123 + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) + req.method = 'DELETE' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + def test_vsa_delete_invalid_id(self): + global last_param + last_param = {} + + vsa_id = 234 + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) + req.method = 'DELETE' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + def test_vsa_show(self): + global last_param + last_param = {} + + vsa_id = 123 + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + resp_dict = json.loads(resp.body) + self.assertTrue('vsa' in resp_dict) + self.assertEqual(resp_dict['vsa']['id'], str(vsa_id)) + + def test_vsa_show_invalid_id(self): + global last_param + last_param = {} + + vsa_id = 234 + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + def test_vsa_index(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa') + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + + self.assertTrue('vsaSet' in resp_dict) + resp_vsas = resp_dict['vsaSet'] + self.assertEqual(len(resp_vsas), 1) + + resp_vsa = resp_vsas.pop() + self.assertEqual(resp_vsa['id'], 123) + + def test_vsa_detail(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa/detail') + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + + self.assertTrue('vsaSet' in resp_dict) + resp_vsas = resp_dict['vsaSet'] + self.assertEqual(len(resp_vsas), 1) + + resp_vsa = resp_vsas.pop() + self.assertEqual(resp_vsa['id'], 123) + + +def _get_default_volume_param(): + return { + 'id': 123, + 'status': 'available', + 'size': 100, + 'availability_zone': 'nova', + 'created_at': None, + 'attach_status': 'detached', + 'name': 'vol name', + 'display_name': 'Default vol name', + 'display_description': 'Default vol description', + 'volume_type_id': 1, + 'volume_metadata': [], + } + + +def stub_get_vsa_volume_type(self, context): + return {'id': 1, + 'name': 'VSA volume type', + 'extra_specs': {'type': 'vsa_volume'}} + + +def stub_volume_create(self, context, size, snapshot_id, name, description, + **param): + LOG.debug(_("_create: param=%s"), size) + vol = _get_default_volume_param() + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + return vol + + +def stub_volume_update(self, context, **param): + LOG.debug(_("_volume_update: param=%s"), param) + pass + + +def stub_volume_delete(self, context, **param): + LOG.debug(_("_volume_delete: param=%s"), param) + pass + + +def stub_volume_get(self, context, volume_id): + LOG.debug(_("_volume_get: volume_id=%s"), volume_id) + vol = _get_default_volume_param() + vol['id'] = volume_id + meta = {'key': 'from_vsa_id', 'value': '123'} + if volume_id == '345': + meta = {'key': 'to_vsa_id', 'value': '123'} + vol['volume_metadata'].append(meta) + return vol + + +def stub_volume_get_notfound(self, context, volume_id): + raise exception.NotFound + + +def stub_volume_get_all(self, context, search_opts): + vol = stub_volume_get(self, context, '123') + vol['metadata'] = search_opts['metadata'] + return [vol] + + +def return_vsa(context, vsa_id): + return {'id': vsa_id} + + +class VSAVolumeApiTest(test.TestCase): + + def setUp(self, test_obj=None, test_objs=None): + super(VSAVolumeApiTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + self.stubs.Set(nova.db.api, 'vsa_get', return_vsa) + self.stubs.Set(vsa.api.API, "get_vsa_volume_type", + stub_get_vsa_volume_type) + + self.stubs.Set(volume.api.API, "update", stub_volume_update) + self.stubs.Set(volume.api.API, "delete", stub_volume_delete) + self.stubs.Set(volume.api.API, "get", stub_volume_get) + self.stubs.Set(volume.api.API, "get_all", stub_volume_get_all) + + self.context = context.get_admin_context() + self.test_obj = test_obj if test_obj else "volume" + self.test_objs = test_objs if test_objs else "volumes" + + def tearDown(self): + self.stubs.UnsetAll() + super(VSAVolumeApiTest, self).tearDown() + + def test_vsa_volume_create(self): + self.stubs.Set(volume.api.API, "create", stub_volume_create) + + vol = {"size": 100, + "displayName": "VSA Volume Test Name", + "displayDescription": "VSA Volume Test Desc"} + body = {self.test_obj: vol} + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs) + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + resp = req.get_response(fakes.wsgi_app()) + + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + self.assertTrue(self.test_obj in resp_dict) + self.assertEqual(resp_dict[self.test_obj]['size'], + vol['size']) + self.assertEqual(resp_dict[self.test_obj]['displayName'], + vol['displayName']) + self.assertEqual(resp_dict[self.test_obj]['displayDescription'], + vol['displayDescription']) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_create_no_body(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs) + req.method = 'POST' + req.body = json.dumps({}) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 422) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_index(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + def test_vsa_volume_detail(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/detail' % \ + self.test_objs) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + def test_vsa_volume_show(self): + obj_num = 234 if self.test_objs == "volumes" else 345 + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \ + (self.test_objs, obj_num)) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + def test_vsa_volume_show_no_vsa_assignment(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \ + (self.test_objs)) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_show_no_volume(self): + self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound) + + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \ + (self.test_objs)) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + + def test_vsa_volume_update(self): + obj_num = 234 if self.test_objs == "volumes" else 345 + update = {"status": "available", + "displayName": "Test Display name"} + body = {self.test_obj: update} + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \ + (self.test_objs, obj_num)) + req.method = 'PUT' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 202) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_delete(self): + obj_num = 234 if self.test_objs == "volumes" else 345 + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \ + (self.test_objs, obj_num)) + req.method = 'DELETE' + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 202) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_delete_no_vsa_assignment(self): + req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \ + (self.test_objs)) + req.method = 'DELETE' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_delete_no_volume(self): + self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound) + + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \ + (self.test_objs)) + req.method = 'DELETE' + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 404) + else: + self.assertEqual(resp.status_int, 400) + + +class VSADriveApiTest(VSAVolumeApiTest): + def setUp(self): + super(VSADriveApiTest, self).setUp(test_obj="drive", + test_objs="drives") + + def tearDown(self): + self.stubs.UnsetAll() + super(VSADriveApiTest, self).tearDown() diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index a095dd90a..44681d395 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -107,13 +107,20 @@ def stub_out_key_pair_funcs(stubs, have_key_pair=True): def key_pair(context, user_id): return [dict(name='key', public_key='public_key')] + def one_key_pair(context, user_id, name): + if name == 'key': + return dict(name='key', public_key='public_key') + else: + raise exc.KeypairNotFound(user_id=user_id, name=name) + def no_key_pair(context, user_id): return [] if have_key_pair: - stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair) + stubs.Set(nova.db.api, 'key_pair_get_all_by_user', key_pair) + stubs.Set(nova.db.api, 'key_pair_get', one_key_pair) else: - stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair) + stubs.Set(nova.db.api, 'key_pair_get_all_by_user', no_key_pair) def stub_out_image_service(stubs): diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index c78588d65..31443242b 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -95,6 +95,8 @@ class ExtensionControllerTest(test.TestCase): "Quotas", "Rescue", "SecurityGroups", + "SimpleTenantUsage", + "VSAs", "VirtualInterfaces", "Volumes", "VolumeTypes", diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 3dfdeb79c..b9ef41465 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -10,8 +10,8 @@ from nova import utils from nova import exception from nova import flags from nova.api.openstack import create_instance_helper +from nova.compute import vm_states from nova.compute import instance_types -from nova.compute import power_state import nova.db.api from nova import test from nova.tests.api.openstack import common @@ -35,17 +35,19 @@ def return_server_with_attributes(**kwargs): return _return_server -def return_server_with_power_state(power_state): - return return_server_with_attributes(power_state=power_state) +def return_server_with_state(vm_state, task_state=None): + return return_server_with_attributes(vm_state=vm_state, + task_state=task_state) -def return_server_with_uuid_and_power_state(power_state): - return return_server_with_power_state(power_state) - +def return_server_with_uuid_and_state(vm_state, task_state=None): + def _return_server(context, id): + return return_server_with_state(vm_state, task_state) + return _return_server -def stub_instance(id, power_state=0, metadata=None, - image_ref="10", flavor_id="1", name=None): +def stub_instance(id, metadata=None, image_ref="10", flavor_id="1", + name=None, vm_state=None, task_state=None): if metadata is not None: metadata_items = [{'key':k, 'value':v} for k, v in metadata.items()] else: @@ -66,8 +68,8 @@ def stub_instance(id, power_state=0, metadata=None, "launch_index": 0, "key_name": "", "key_data": "", - "state": power_state, - "state_description": "", + "vm_state": vm_state or vm_states.ACTIVE, + "task_state": task_state, "memory_mb": 0, "vcpus": 0, "local_gb": 0, @@ -175,11 +177,11 @@ class ServerActionsTest(test.TestCase): }, } - state = power_state.BUILDING - new_return_server = return_server_with_power_state(state) + state = vm_states.BUILDING + new_return_server = return_server_with_state(state) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) self.stubs.Set(nova.db, 'instance_get_by_uuid', - return_server_with_uuid_and_power_state(state)) + return_server_with_uuid_and_state(state)) req = webob.Request.blank('/v1.0/servers/1/action') req.method = 'POST' @@ -242,19 +244,6 @@ class ServerActionsTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 500) - def test_resized_server_has_correct_status(self): - req = self.webreq('/1', 'GET') - - def fake_migration_get(*args): - return {} - - self.stubs.Set(nova.db, 'migration_get_by_instance_and_status', - fake_migration_get) - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - body = json.loads(res.body) - self.assertEqual(body['server']['status'], 'RESIZE-CONFIRM') - def test_confirm_resize_server(self): req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) @@ -642,11 +631,11 @@ class ServerActionsTestV11(test.TestCase): }, } - state = power_state.BUILDING - new_return_server = return_server_with_power_state(state) + state = vm_states.BUILDING + new_return_server = return_server_with_state(state) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) self.stubs.Set(nova.db, 'instance_get_by_uuid', - return_server_with_uuid_and_power_state(state)) + return_server_with_uuid_and_state(state)) req = webob.Request.blank('/v1.1/fake/servers/1/action') req.method = 'POST' diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 87ae19f56..00640c094 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -37,7 +37,8 @@ from nova.api.openstack import wsgi from nova.api.openstack import xmlutil import nova.compute.api from nova.compute import instance_types -from nova.compute import power_state +from nova.compute import task_states +from nova.compute import vm_states import nova.db.api import nova.scheduler.api from nova.db.sqlalchemy.models import Instance @@ -91,15 +92,18 @@ def return_server_with_addresses(private, public): return _return_server -def return_server_with_power_state(power_state): +def return_server_with_state(vm_state, task_state=None): def _return_server(context, id): - return stub_instance(id, power_state=power_state) + return stub_instance(id, vm_state=vm_state, task_state=task_state) return _return_server -def return_server_with_uuid_and_power_state(power_state): +def return_server_with_uuid_and_state(vm_state, task_state): def _return_server(context, id): - return stub_instance(id, uuid=FAKE_UUID, power_state=power_state) + return stub_instance(id, + uuid=FAKE_UUID, + vm_state=vm_state, + task_state=task_state) return _return_server @@ -148,9 +152,10 @@ def instance_addresses(context, instance_id): def stub_instance(id, user_id='fake', project_id='fake', private_address=None, - public_addresses=None, host=None, power_state=0, + public_addresses=None, host=None, + vm_state=None, task_state=None, reservation_id="", uuid=FAKE_UUID, image_ref="10", - flavor_id="1", interfaces=None, name=None, + flavor_id="1", interfaces=None, name=None, key_name='', access_ipv4=None, access_ipv6=None): metadata = [] metadata.append(InstanceMetadata(key='seq', value=id)) @@ -166,6 +171,11 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, if host is not None: host = str(host) + if key_name: + key_data = 'FAKE' + else: + key_data = '' + # ReservationID isn't sent back, hack it in there. server_name = name or "server%s" % id if reservation_id != "": @@ -182,10 +192,10 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, "kernel_id": "", "ramdisk_id": "", "launch_index": 0, - "key_name": "", - "key_data": "", - "state": power_state, - "state_description": "", + "key_name": key_name, + "key_data": key_data, + "vm_state": vm_state or vm_states.BUILDING, + "task_state": task_state, "memory_mb": 0, "vcpus": 0, "local_gb": 0, @@ -345,6 +355,7 @@ class ServersTest(test.TestCase): "accessIPv4": "", "accessIPv6": "", "hostId": '', + "key_name": '', "image": { "id": "10", "links": [ @@ -456,7 +467,7 @@ class ServersTest(test.TestCase): }, ] new_return_server = return_server_with_attributes( - interfaces=interfaces, power_state=1) + interfaces=interfaces, vm_state=vm_states.ACTIVE) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) req = webob.Request.blank('/v1.1/fake/servers/1') @@ -474,6 +485,7 @@ class ServersTest(test.TestCase): "accessIPv4": "", "accessIPv6": "", "hostId": '', + "key_name": '', "image": { "id": "10", "links": [ @@ -549,8 +561,8 @@ class ServersTest(test.TestCase): }, ] new_return_server = return_server_with_attributes( - interfaces=interfaces, power_state=1, image_ref=image_ref, - flavor_id=flavor_id) + interfaces=interfaces, vm_state=vm_states.ACTIVE, + image_ref=image_ref, flavor_id=flavor_id) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) req = webob.Request.blank('/v1.1/fake/servers/1') @@ -568,6 +580,7 @@ class ServersTest(test.TestCase): "accessIPv4": "", "accessIPv6": "", "hostId": '', + "key_name": '', "image": { "id": "10", "links": [ @@ -1171,9 +1184,8 @@ class ServersTest(test.TestCase): def test_get_servers_allows_status_v1_1(self): def fake_get_all(compute_self, context, search_opts=None): self.assertNotEqual(search_opts, None) - self.assertTrue('state' in search_opts) - self.assertEqual(set(search_opts['state']), - set([power_state.RUNNING, power_state.BLOCKED])) + self.assertTrue('vm_state' in search_opts) + self.assertEqual(search_opts['vm_state'], vm_states.ACTIVE) return [stub_instance(100)] self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) @@ -1190,13 +1202,9 @@ class ServersTest(test.TestCase): def test_get_servers_invalid_status_v1_1(self): """Test getting servers by invalid status""" - self.flags(allow_admin_api=False) - req = webob.Request.blank('/v1.1/fake/servers?status=running') res = req.get_response(fakes.wsgi_app()) - # The following assert will fail if either of the asserts in - # fake_get_all() fail self.assertEqual(res.status_int, 400) self.assertTrue(res.body.find('Invalid server status') > -1) @@ -1219,6 +1227,31 @@ class ServersTest(test.TestCase): self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], 100) + def test_get_servers_allows_changes_since_v1_1(self): + def fake_get_all(compute_self, context, search_opts=None): + self.assertNotEqual(search_opts, None) + self.assertTrue('changes-since' in search_opts) + changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1) + self.assertEqual(search_opts['changes-since'], changes_since) + self.assertTrue('deleted' not in search_opts) + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + + params = 'changes-since=2011-01-24T17:08:01Z' + req = webob.Request.blank('/v1.1/fake/servers?%s' % params) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + + def test_get_servers_allows_changes_since_bad_value_v1_1(self): + params = 'changes-since=asdf' + req = webob.Request.blank('/v1.1/fake/servers?%s' % params) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + def test_get_servers_unknown_or_admin_options1(self): """Test getting servers by admin-only or unknown options. This tests when admin_api is off. Make sure the admin and @@ -1700,6 +1733,7 @@ class ServersTest(test.TestCase): server = json.loads(res.body)['server'] self.assertEqual(16, len(server['adminPass'])) self.assertEqual(1, server['id']) + self.assertEqual("BUILD", server["status"]) self.assertEqual(0, server['progress']) self.assertEqual('server_test', server['name']) self.assertEqual(expected_flavor, server['flavor']) @@ -1707,6 +1741,36 @@ class ServersTest(test.TestCase): self.assertEqual('1.2.3.4', server['accessIPv4']) self.assertEqual('fead::1234', server['accessIPv6']) + def test_create_instance_v1_1_invalid_key_name(self): + self._setup_for_create_instance() + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/flavors/3' + body = dict(server=dict( + name='server_test', imageRef=image_href, flavorRef=flavor_ref, + key_name='nonexistentkey')) + req = webob.Request.blank('/v1.1/fake/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_create_instance_v1_1_valid_key_name(self): + self._setup_for_create_instance() + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/flavors/3' + body = dict(server=dict( + name='server_test', imageRef=image_href, flavorRef=flavor_ref, + key_name='key')) + req = webob.Request.blank('/v1.1/fake/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + def test_create_instance_v1_1_invalid_flavor_href(self): self._setup_for_create_instance() @@ -2429,23 +2493,51 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 204) self.assertEqual(self.server_delete_called, True) - def test_shutdown_status(self): - new_server = return_server_with_power_state(power_state.SHUTDOWN) - self.stubs.Set(nova.db.api, 'instance_get', new_server) - req = webob.Request.blank('/v1.0/servers/1') - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - res_dict = json.loads(res.body) - self.assertEqual(res_dict['server']['status'], 'SHUTDOWN') - def test_shutoff_status(self): - new_server = return_server_with_power_state(power_state.SHUTOFF) +class TestServerStatus(test.TestCase): + + def _get_with_state(self, vm_state, task_state=None): + new_server = return_server_with_state(vm_state, task_state) self.stubs.Set(nova.db.api, 'instance_get', new_server) - req = webob.Request.blank('/v1.0/servers/1') - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - res_dict = json.loads(res.body) - self.assertEqual(res_dict['server']['status'], 'SHUTOFF') + request = webob.Request.blank('/v1.0/servers/1') + response = request.get_response(fakes.wsgi_app()) + self.assertEqual(response.status_int, 200) + return json.loads(response.body) + + def test_active(self): + response = self._get_with_state(vm_states.ACTIVE) + self.assertEqual(response['server']['status'], 'ACTIVE') + + def test_reboot(self): + response = self._get_with_state(vm_states.ACTIVE, + task_states.REBOOTING) + self.assertEqual(response['server']['status'], 'REBOOT') + + def test_rebuild(self): + response = self._get_with_state(vm_states.REBUILDING) + self.assertEqual(response['server']['status'], 'REBUILD') + + def test_rebuild_error(self): + response = self._get_with_state(vm_states.ERROR) + self.assertEqual(response['server']['status'], 'ERROR') + + def test_resize(self): + response = self._get_with_state(vm_states.RESIZING) + self.assertEqual(response['server']['status'], 'RESIZE') + + def test_verify_resize(self): + response = self._get_with_state(vm_states.ACTIVE, + task_states.RESIZE_VERIFY) + self.assertEqual(response['server']['status'], 'VERIFY_RESIZE') + + def test_password_update(self): + response = self._get_with_state(vm_states.ACTIVE, + task_states.UPDATING_PASSWORD) + self.assertEqual(response['server']['status'], 'PASSWORD') + + def test_stopped(self): + response = self._get_with_state(vm_states.STOPPED) + self.assertEqual(response['server']['status'], 'STOPPED') class TestServerCreateRequestXMLDeserializerV10(unittest.TestCase): @@ -3203,6 +3295,7 @@ class TestServerInstanceCreation(test.TestCase): def __init__(self): self.injected_files = None self.networks = None + self.db = db def create(self, *args, **kwargs): if 'injected_files' in kwargs: @@ -3510,8 +3603,8 @@ class ServersViewBuilderV11Test(test.TestCase): "launch_index": 0, "key_name": "", "key_data": "", - "state": 0, - "state_description": "", + "vm_state": vm_states.BUILDING, + "task_state": None, "memory_mb": 0, "vcpus": 0, "local_gb": 0, @@ -3561,6 +3654,7 @@ class ServersViewBuilderV11Test(test.TestCase): "id": 1, "uuid": self.instance['uuid'], "name": "test_server", + "key_name": '', "links": [ { "rel": "self", @@ -3584,6 +3678,7 @@ class ServersViewBuilderV11Test(test.TestCase): "id": 1, "uuid": self.instance['uuid'], "name": "test_server", + "key_name": '', "config_drive": None, "links": [ { @@ -3617,6 +3712,7 @@ class ServersViewBuilderV11Test(test.TestCase): "accessIPv4": "", "accessIPv6": "", "hostId": '', + "key_name": '', "image": { "id": "5", "links": [ @@ -3656,7 +3752,7 @@ class ServersViewBuilderV11Test(test.TestCase): def test_build_server_detail_active_status(self): #set the power state of the instance to running - self.instance['state'] = 1 + self.instance['vm_state'] = vm_states.ACTIVE image_bookmark = "http://localhost/images/5" flavor_bookmark = "http://localhost/flavors/1" expected_server = { @@ -3671,6 +3767,7 @@ class ServersViewBuilderV11Test(test.TestCase): "accessIPv4": "", "accessIPv6": "", "hostId": '', + "key_name": '', "image": { "id": "5", "links": [ @@ -3722,6 +3819,7 @@ class ServersViewBuilderV11Test(test.TestCase): "created": "2010-10-10T12:00:00Z", "progress": 0, "name": "test_server", + "key_name": "", "status": "BUILD", "hostId": '', "image": { @@ -3777,6 +3875,7 @@ class ServersViewBuilderV11Test(test.TestCase): "created": "2010-10-10T12:00:00Z", "progress": 0, "name": "test_server", + "key_name": "", "status": "BUILD", "hostId": '', "image": { @@ -3839,6 +3938,7 @@ class ServersViewBuilderV11Test(test.TestCase): "accessIPv4": "", "accessIPv6": "", "hostId": '', + "key_name": '', "image": { "id": "5", "links": [ @@ -3982,6 +4082,7 @@ class ServerXMLSerializationTest(test.TestCase): "name": "test_server", "status": "BUILD", "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', + "key_name": '', "accessIPv4": "1.2.3.4", "accessIPv6": "fead::1234", "image": { diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 0ff508ffa..b1ebd8436 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -20,6 +20,7 @@ import datetime import unittest from nova import context +from nova import exception from nova import test from nova.image import glance @@ -38,7 +39,16 @@ class StubGlanceClient(object): return self.images[image_id] def get_images_detailed(self, filters=None, marker=None, limit=None): - return self.images.itervalues() + images = self.images.values() + if marker is None: + index = 0 + else: + for index, image in enumerate(images): + if image['id'] == marker: + index += 1 + break + # default to a page size of 3 to ensure we flex the pagination code + return images[index:index + 3] def get_image(self, image_id): return self.images[image_id], [] @@ -86,23 +96,48 @@ class TestGlanceImageServiceProperties(BaseGlanceTest): """Ensure attributes which aren't BASE_IMAGE_ATTRS are stored in the properties dict """ - fixtures = {'image1': {'name': 'image1', 'is_public': True, + fixtures = {'image1': {'id': '1', 'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': {'prop1': 'propvalue1'}}} self.client.images = fixtures image_meta = self.service.show(self.context, 'image1') + expected = {'id': '1', 'name': 'image1', 'is_public': True, + 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} + self.assertEqual(image_meta, expected) + + def test_show_raises_when_no_authtoken_in_the_context(self): + fixtures = {'image1': {'name': 'image1', 'is_public': False, + 'foo': 'bar', + 'properties': {'prop1': 'propvalue1'}}} + self.client.images = fixtures + self.context.auth_token = False + expected = {'name': 'image1', 'is_public': True, 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} + self.assertRaises(exception.ImageNotFound, + self.service.show, self.context, 'image1') + + def test_show_passes_through_to_client_with_authtoken_in_context(self): + fixtures = {'image1': {'name': 'image1', 'is_public': False, + 'foo': 'bar', + 'properties': {'prop1': 'propvalue1'}}} + self.client.images = fixtures + self.context.auth_token = True + + expected = {'name': 'image1', 'is_public': False, + 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} + + image_meta = self.service.show(self.context, 'image1') self.assertEqual(image_meta, expected) def test_detail_passes_through_to_client(self): - fixtures = {'image1': {'name': 'image1', 'is_public': True, + fixtures = {'image1': {'id': '1', 'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': {'prop1': 'propvalue1'}}} self.client.images = fixtures image_meta = self.service.detail(self.context) - expected = [{'name': 'image1', 'is_public': True, + expected = [{'id': '1', 'name': 'image1', 'is_public': True, 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}] self.assertEqual(image_meta, expected) @@ -166,6 +201,7 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest): def _make_datetime_fixtures(self): fixtures = { 'image1': { + 'id': '1', 'name': 'image1', 'is_public': True, 'created_at': self.NOW_GLANCE_FORMAT, @@ -173,6 +209,7 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest): 'deleted_at': self.NOW_GLANCE_FORMAT, }, 'image2': { + 'id': '2', 'name': 'image2', 'is_public': True, 'created_at': self.NOW_GLANCE_OLD_FORMAT, @@ -183,13 +220,17 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest): return fixtures def _make_none_datetime_fixtures(self): - fixtures = {'image1': {'name': 'image1', 'is_public': True, + fixtures = {'image1': {'id': '1', + 'name': 'image1', + 'is_public': True, 'updated_at': None, 'deleted_at': None}} return fixtures def _make_blank_datetime_fixtures(self): - fixtures = {'image1': {'name': 'image1', 'is_public': True, + fixtures = {'image1': {'id': '1', + 'name': 'image1', + 'is_public': True, 'updated_at': '', 'deleted_at': ''}} return fixtures diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index c2f800689..2cf604d06 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -28,6 +28,17 @@ LOG = logging.getLogger('nova.tests.integrated') class ServersTest(integrated_helpers._IntegratedTestBase): + def _wait_for_creation(self, server): + retries = 0 + while server['status'] == 'BUILD': + time.sleep(1) + server = self.api.get_server(server['id']) + print server + retries = retries + 1 + if retries > 5: + break + return server + def test_get_servers(self): """Simple check that listing servers works.""" servers = self.api.get_servers() @@ -36,9 +47,9 @@ class ServersTest(integrated_helpers._IntegratedTestBase): def test_create_and_delete_server(self): """Creates and deletes a server.""" + self.flags(stub_network=True) # Create server - # Build the server data gradually, checking errors along the way server = {} good_server = self._build_minimal_create_server_request() @@ -91,19 +102,11 @@ class ServersTest(integrated_helpers._IntegratedTestBase): server_ids = [server['id'] for server in servers] self.assertTrue(created_server_id in server_ids) - # Wait (briefly) for creation - retries = 0 - while found_server['status'] == 'build': - LOG.debug("found server: %s" % found_server) - time.sleep(1) - found_server = self.api.get_server(created_server_id) - retries = retries + 1 - if retries > 5: - break + found_server = self._wait_for_creation(found_server) # It should be available... # TODO(justinsb): Mock doesn't yet do this... - #self.assertEqual('available', found_server['status']) + self.assertEqual('ACTIVE', found_server['status']) servers = self.api.get_servers(detail=True) for server in servers: self.assertTrue("image" in server) @@ -181,6 +184,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): def test_create_and_rebuild_server(self): """Rebuild a server.""" + self.flags(stub_network=True) # create a server with initially has no metadata server = self._build_minimal_create_server_request() @@ -190,10 +194,12 @@ class ServersTest(integrated_helpers._IntegratedTestBase): self.assertTrue(created_server['id']) created_server_id = created_server['id'] + created_server = self._wait_for_creation(created_server) + # rebuild the server with metadata post = {} post['rebuild'] = { - "imageRef": "https://localhost/v1.1/32278/images/2", + "imageRef": "https://localhost/v1.1/32278/images/3", "name": "blah", } @@ -205,12 +211,14 @@ class ServersTest(integrated_helpers._IntegratedTestBase): self.assertEqual(created_server_id, found_server['id']) self.assertEqual({}, found_server.get('metadata')) self.assertEqual('blah', found_server.get('name')) + self.assertEqual('3', found_server.get('image')['id']) # Cleanup self._delete_server(created_server_id) def test_create_and_rebuild_server_with_metadata(self): """Rebuild a server with metadata.""" + self.flags(stub_network=True) # create a server with initially has no metadata server = self._build_minimal_create_server_request() @@ -220,6 +228,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): self.assertTrue(created_server['id']) created_server_id = created_server['id'] + created_server = self._wait_for_creation(created_server) + # rebuild the server with metadata post = {} post['rebuild'] = { @@ -247,6 +257,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): def test_create_and_rebuild_server_with_metadata_removal(self): """Rebuild a server with metadata.""" + self.flags(stub_network=True) # create a server with initially has no metadata server = self._build_minimal_create_server_request() @@ -263,6 +274,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): self.assertTrue(created_server['id']) created_server_id = created_server['id'] + created_server = self._wait_for_creation(created_server) + # rebuild the server with metadata post = {} post['rebuild'] = { diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 158df2a27..a52dd041a 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -40,6 +40,7 @@ from nova.scheduler import driver from nova.scheduler import manager from nova.scheduler import multi from nova.compute import power_state +from nova.compute import vm_states FLAGS = flags.FLAGS @@ -94,6 +95,9 @@ class SchedulerTestCase(test.TestCase): inst['vcpus'] = kwargs.get('vcpus', 1) inst['memory_mb'] = kwargs.get('memory_mb', 10) inst['local_gb'] = kwargs.get('local_gb', 20) + inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE) + inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) + inst['task_state'] = kwargs.get('task_state', None) return db.instance_create(ctxt, inst) def test_fallback(self): @@ -271,8 +275,9 @@ class SimpleDriverTestCase(test.TestCase): inst['memory_mb'] = kwargs.get('memory_mb', 20) inst['local_gb'] = kwargs.get('local_gb', 30) inst['launched_on'] = kwargs.get('launghed_on', 'dummy') - inst['state_description'] = kwargs.get('state_description', 'running') - inst['state'] = kwargs.get('state', power_state.RUNNING) + inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE) + inst['task_state'] = kwargs.get('task_state', None) + inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) return db.instance_create(self.context, inst)['id'] def _create_volume(self): @@ -664,14 +669,14 @@ class SimpleDriverTestCase(test.TestCase): block_migration=False) i_ref = db.instance_get(self.context, instance_id) - self.assertTrue(i_ref['state_description'] == 'migrating') + self.assertTrue(i_ref['vm_state'] == vm_states.MIGRATING) db.instance_destroy(self.context, instance_id) db.volume_destroy(self.context, v_ref['id']) def test_live_migration_src_check_instance_not_running(self): """The instance given by instance_id is not running.""" - instance_id = self._create_instance(state_description='migrating') + instance_id = self._create_instance(power_state=power_state.NOSTATE) i_ref = db.instance_get(self.context, instance_id) try: diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py new file mode 100644 index 000000000..37964f00d --- /dev/null +++ b/nova/tests/scheduler/test_vsa_scheduler.py @@ -0,0 +1,641 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stubout + +import nova + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import test +from nova import utils +from nova.volume import volume_types + +from nova.scheduler import vsa as vsa_sched +from nova.scheduler import driver + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.scheduler.vsa') + +scheduled_volumes = [] +scheduled_volume = {} +global_volume = {} + + +class FakeVsaLeastUsedScheduler( + vsa_sched.VsaSchedulerLeastUsedHost): + # No need to stub anything at the moment + pass + + +class FakeVsaMostAvailCapacityScheduler( + vsa_sched.VsaSchedulerMostAvailCapacity): + # No need to stub anything at the moment + pass + + +class VsaSchedulerTestCase(test.TestCase): + + def _get_vol_creation_request(self, num_vols, drive_ix, size=0): + volume_params = [] + for i in range(num_vols): + + name = 'name_' + str(i) + try: + volume_types.create(self.context, name, + extra_specs={'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': 'type_' + str(drive_ix), + 'drive_size': 1 + 100 * (drive_ix)}) + self.created_types_lst.append(name) + except exception.ApiError: + # type is already created + pass + + volume_type = volume_types.get_volume_type_by_name(self.context, + name) + volume = {'size': size, + 'snapshot_id': None, + 'name': 'vol_' + str(i), + 'description': None, + 'volume_type_id': volume_type['id']} + volume_params.append(volume) + + return {'num_volumes': len(volume_params), + 'vsa_id': 123, + 'volumes': volume_params} + + def _generate_default_service_states(self): + service_states = {} + for i in range(self.host_num): + host = {} + hostname = 'host_' + str(i) + if hostname in self.exclude_host_list: + continue + + host['volume'] = {'timestamp': utils.utcnow(), + 'drive_qos_info': {}} + + for j in range(self.drive_type_start_ix, + self.drive_type_start_ix + self.drive_type_num): + dtype = {} + dtype['Name'] = 'name_' + str(j) + dtype['DriveType'] = 'type_' + str(j) + dtype['TotalDrives'] = 2 * (self.init_num_drives + i) + dtype['DriveCapacity'] = vsa_sched.GB_TO_BYTES(1 + 100 * j) + dtype['TotalCapacity'] = dtype['TotalDrives'] * \ + dtype['DriveCapacity'] + dtype['AvailableCapacity'] = (dtype['TotalDrives'] - i) * \ + dtype['DriveCapacity'] + dtype['DriveRpm'] = 7200 + dtype['DifCapable'] = 0 + dtype['SedCapable'] = 0 + dtype['PartitionDrive'] = { + 'PartitionSize': 0, + 'NumOccupiedPartitions': 0, + 'NumFreePartitions': 0} + dtype['FullDrive'] = { + 'NumFreeDrives': dtype['TotalDrives'] - i, + 'NumOccupiedDrives': i} + host['volume']['drive_qos_info'][dtype['Name']] = dtype + + service_states[hostname] = host + + return service_states + + def _print_service_states(self): + for host, host_val in self.service_states.iteritems(): + LOG.info(_("Host %s"), host) + total_used = 0 + total_available = 0 + qos = host_val['volume']['drive_qos_info'] + + for k, d in qos.iteritems(): + LOG.info("\t%s: type %s: drives (used %2d, total %2d) "\ + "size %3d, total %4d, used %4d, avail %d", + k, d['DriveType'], + d['FullDrive']['NumOccupiedDrives'], d['TotalDrives'], + vsa_sched.BYTES_TO_GB(d['DriveCapacity']), + vsa_sched.BYTES_TO_GB(d['TotalCapacity']), + vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \ + d['AvailableCapacity']), + vsa_sched.BYTES_TO_GB(d['AvailableCapacity'])) + + total_used += vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \ + d['AvailableCapacity']) + total_available += vsa_sched.BYTES_TO_GB( + d['AvailableCapacity']) + LOG.info("Host %s: used %d, avail %d", + host, total_used, total_available) + + def _set_service_states(self, host_num, + drive_type_start_ix, drive_type_num, + init_num_drives=10, + exclude_host_list=[]): + self.host_num = host_num + self.drive_type_start_ix = drive_type_start_ix + self.drive_type_num = drive_type_num + self.exclude_host_list = exclude_host_list + self.init_num_drives = init_num_drives + self.service_states = self._generate_default_service_states() + + def _get_service_states(self): + return self.service_states + + def _fake_get_service_states(self): + return self._get_service_states() + + def _fake_provision_volume(self, context, vol, vsa_id, availability_zone): + global scheduled_volumes + scheduled_volumes.append(dict(vol=vol, + vsa_id=vsa_id, + az=availability_zone)) + name = vol['name'] + host = vol['host'] + LOG.debug(_("Test: provision vol %(name)s on host %(host)s"), + locals()) + LOG.debug(_("\t vol=%(vol)s"), locals()) + pass + + def _fake_vsa_update(self, context, vsa_id, values): + LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\ + "values=%(values)s"), locals()) + pass + + def _fake_volume_create(self, context, options): + LOG.debug(_("Test: Volume create: %s"), options) + options['id'] = 123 + global global_volume + global_volume = options + return options + + def _fake_volume_get(self, context, volume_id): + LOG.debug(_("Test: Volume get request: id=%(volume_id)s"), locals()) + global global_volume + global_volume['id'] = volume_id + global_volume['availability_zone'] = None + return global_volume + + def _fake_volume_update(self, context, volume_id, values): + LOG.debug(_("Test: Volume update request: id=%(volume_id)s "\ + "values=%(values)s"), locals()) + global scheduled_volume + scheduled_volume = {'id': volume_id, 'host': values['host']} + pass + + def _fake_service_get_by_args(self, context, host, binary): + return "service" + + def _fake_service_is_up_True(self, service): + return True + + def _fake_service_is_up_False(self, service): + return False + + def setUp(self, sched_class=None): + super(VsaSchedulerTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.context = context.get_admin_context() + + if sched_class is None: + self.sched = FakeVsaLeastUsedScheduler() + else: + self.sched = sched_class + + self.host_num = 10 + self.drive_type_num = 5 + + self.stubs.Set(self.sched, + '_get_service_states', self._fake_get_service_states) + self.stubs.Set(self.sched, + '_provision_volume', self._fake_provision_volume) + self.stubs.Set(nova.db, 'vsa_update', self._fake_vsa_update) + + self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get) + self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update) + + self.created_types_lst = [] + + def tearDown(self): + for name in self.created_types_lst: + volume_types.purge(self.context, name) + + self.stubs.UnsetAll() + super(VsaSchedulerTestCase, self).tearDown() + + def test_vsa_sched_create_volumes_simple(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_1', 'host_3']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_0') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_2') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4') + + cur = self._get_service_states() + for host in ['host_0', 'host_2', 'host_4']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_2'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_2'] + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + def test_vsa_sched_no_drive_type(self): + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1) + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6) + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone=None) + + def test_vsa_sched_no_enough_drives(self): + global scheduled_volumes + scheduled_volumes = [] + + self._set_service_states(host_num=3, + drive_type_start_ix=0, + drive_type_num=1, + init_num_drives=0) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0) + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone=None) + + # check that everything was returned back + cur = self._get_service_states() + for k, v in prev.iteritems(): + self.assertEqual(prev[k]['volume']['drive_qos_info'], + cur[k]['volume']['drive_qos_info']) + + def test_vsa_sched_wrong_topic(self): + self._set_service_states(host_num=1, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1) + states = self._get_service_states() + new_states = {} + new_states['host_0'] = {'compute': states['host_0']['volume']} + self.service_states = new_states + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone=None) + + def test_vsa_sched_provision_volume(self): + global global_volume + global_volume = {} + self._set_service_states(host_num=1, + drive_type_start_ix=0, + drive_type_num=1, + init_num_drives=1) + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.stubs.UnsetAll() + self.stubs.Set(self.sched, + '_get_service_states', self._fake_get_service_states) + self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(request_spec['volumes'][0]['name'], + global_volume['display_name']) + + def test_vsa_sched_no_free_drives(self): + self._set_service_states(host_num=1, + drive_type_start_ix=0, + drive_type_num=1, + init_num_drives=1) + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + cur = self._get_service_states() + cur_dtype = cur['host_0']['volume']['drive_qos_info']['name_0'] + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], 1) + + new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + self._print_service_states() + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + new_request, + availability_zone=None) + + def test_vsa_sched_forced_host(self): + global scheduled_volumes + scheduled_volumes = [] + + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10) + + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2) + + self.assertRaises(exception.HostBinaryNotFound, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone="nova:host_5") + + self.stubs.Set(nova.db, + 'service_get_by_args', self._fake_service_get_by_args) + self.stubs.Set(self.sched, + 'service_is_up', self._fake_service_is_up_False) + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone="nova:host_5") + + self.stubs.Set(self.sched, + 'service_is_up', self._fake_service_is_up_True) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone="nova:host_5") + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_5') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_5') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_5') + + def test_vsa_sched_create_volumes_partition(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=5, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1, + exclude_host_list=['host_0', 'host_2']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, + drive_ix=3, + size=50) + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_1') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4') + + cur = self._get_service_states() + for host in ['host_1', 'host_3', 'host_4']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_3'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_3'] + + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 0) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 1) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumFreePartitions'], 5) + + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumFreePartitions'], 0) + self.assertEqual(prev_dtype['PartitionDrive'] + ['PartitionSize'], 0) + + def test_vsa_sched_create_single_volume_az(self): + global scheduled_volume + scheduled_volume = {} + + def _fake_volume_get_az(context, volume_id): + LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals()) + return {'id': volume_id, 'availability_zone': 'nova:host_3'} + + self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az) + self.stubs.Set(nova.db, + 'service_get_by_args', self._fake_service_get_by_args) + self.stubs.Set(self.sched, + 'service_is_up', self._fake_service_is_up_True) + + host = self.sched.schedule_create_volume(self.context, + 123, availability_zone=None) + + self.assertEqual(host, 'host_3') + self.assertEqual(scheduled_volume['id'], 123) + self.assertEqual(scheduled_volume['host'], 'host_3') + + def test_vsa_sched_create_single_non_vsa_volume(self): + global scheduled_volume + scheduled_volume = {} + + global global_volume + global_volume = {} + global_volume['volume_type_id'] = None + + self.assertRaises(driver.NoValidHost, + self.sched.schedule_create_volume, + self.context, + 123, + availability_zone=None) + + def test_vsa_sched_create_single_volume(self): + global scheduled_volume + scheduled_volume = {} + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_0', 'host_1']) + prev = self._generate_default_service_states() + + global global_volume + global_volume = {} + + drive_ix = 2 + name = 'name_' + str(drive_ix) + volume_types.create(self.context, name, + extra_specs={'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': 'type_' + str(drive_ix), + 'drive_size': 1 + 100 * (drive_ix)}) + self.created_types_lst.append(name) + volume_type = volume_types.get_volume_type_by_name(self.context, name) + + global_volume['volume_type_id'] = volume_type['id'] + global_volume['size'] = 0 + + host = self.sched.schedule_create_volume(self.context, + 123, availability_zone=None) + + self.assertEqual(host, 'host_2') + self.assertEqual(scheduled_volume['id'], 123) + self.assertEqual(scheduled_volume['host'], 'host_2') + + +class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase): + + def setUp(self): + super(VsaSchedulerTestCaseMostAvail, self).setUp( + FakeVsaMostAvailCapacityScheduler()) + + def tearDown(self): + self.stubs.UnsetAll() + super(VsaSchedulerTestCaseMostAvail, self).tearDown() + + def test_vsa_sched_create_single_volume(self): + global scheduled_volume + scheduled_volume = {} + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_0', 'host_1']) + prev = self._generate_default_service_states() + + global global_volume + global_volume = {} + + drive_ix = 2 + name = 'name_' + str(drive_ix) + volume_types.create(self.context, name, + extra_specs={'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': 'type_' + str(drive_ix), + 'drive_size': 1 + 100 * (drive_ix)}) + self.created_types_lst.append(name) + volume_type = volume_types.get_volume_type_by_name(self.context, name) + + global_volume['volume_type_id'] = volume_type['id'] + global_volume['size'] = 0 + + host = self.sched.schedule_create_volume(self.context, + 123, availability_zone=None) + + self.assertEqual(host, 'host_9') + self.assertEqual(scheduled_volume['id'], 123) + self.assertEqual(scheduled_volume['host'], 'host_9') + + def test_vsa_sched_create_volumes_simple(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_1', 'host_3']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2) + + self._print_service_states() + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_9') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_8') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_7') + + cur = self._get_service_states() + for host in ['host_9', 'host_8', 'host_7']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_2'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_2'] + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + def test_vsa_sched_create_volumes_partition(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=5, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1, + exclude_host_list=['host_0', 'host_2']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, + drive_ix=3, + size=50) + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_4') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_1') + + cur = self._get_service_states() + for host in ['host_1', 'host_3', 'host_4']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_3'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_3'] + + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 0) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 1) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumFreePartitions'], 5) + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumFreePartitions'], 0) + self.assertEqual(prev_dtype['PartitionDrive'] + ['PartitionSize'], 0) diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py index 06cc498ac..aaa633adc 100644 --- a/nova/tests/test_adminapi.py +++ b/nova/tests/test_adminapi.py @@ -38,8 +38,6 @@ class AdminApiTestCase(test.TestCase): super(AdminApiTestCase, self).setUp() self.flags(connection_type='fake') - self.conn = rpc.create_connection() - # set up our cloud self.api = admin.AdminController() diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 0793784f8..3fe6a9b42 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -38,6 +38,7 @@ from nova import test from nova import utils from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils +from nova.compute import vm_states from nova.image import fake @@ -51,8 +52,6 @@ class CloudTestCase(test.TestCase): self.flags(connection_type='fake', stub_network=True) - self.conn = rpc.create_connection() - # set up our cloud self.cloud = cloud.CloudController() @@ -86,13 +85,6 @@ class CloudTestCase(test.TestCase): self.stubs.Set(rpc, 'cast', finish_cast) - def tearDown(self): - networks = db.project_get_networks(self.context, self.project_id, - associate=False) - for network in networks: - db.network_disassociate(self.context, network['id']) - super(CloudTestCase, self).tearDown() - def _create_key(self, name): # NOTE(vish): create depends on pool, so just call helper directly return cloud._gen_key(self.context, self.context.user_id, name) @@ -494,8 +486,11 @@ class CloudTestCase(test.TestCase): inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) result = self.cloud.describe_instances(self.context) - result = result['reservationSet'][0]['instancesSet'] - self.assertEqual(result[0]['instanceId'], + result1 = result['reservationSet'][0]['instancesSet'] + self.assertEqual(result1[0]['instanceId'], + ec2utils.id_to_ec2_id(inst1.id)) + result2 = result['reservationSet'][1]['instancesSet'] + self.assertEqual(result2[0]['instanceId'], ec2utils.id_to_ec2_id(inst2.id)) def _block_device_mapping_create(self, instance_id, mappings): @@ -1163,7 +1158,7 @@ class CloudTestCase(test.TestCase): self.compute = self.start_service('compute') def _wait_for_state(self, ctxt, instance_id, predicate): - """Wait for an stopping instance to be a given state""" + """Wait for a stopped instance to be a given state""" id = ec2utils.ec2_id_to_id(instance_id) while True: info = self.cloud.compute_api.get(context=ctxt, instance_id=id) @@ -1174,12 +1169,16 @@ class CloudTestCase(test.TestCase): def _wait_for_running(self, instance_id): def is_running(info): - return info['state_description'] == 'running' + vm_state = info["vm_state"] + task_state = info["task_state"] + return vm_state == vm_states.ACTIVE and task_state == None self._wait_for_state(self.context, instance_id, is_running) def _wait_for_stopped(self, instance_id): def is_stopped(info): - return info['state_description'] == 'stopped' + vm_state = info["vm_state"] + task_state = info["task_state"] + return vm_state == vm_states.STOPPED and task_state == None self._wait_for_state(self.context, instance_id, is_stopped) def _wait_for_terminate(self, instance_id): @@ -1562,7 +1561,7 @@ class CloudTestCase(test.TestCase): 'id': 0, 'root_device_name': '/dev/sdh', 'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}], - 'state_description': 'stopping', + 'vm_state': vm_states.STOPPED, 'instance_type': {'name': 'fake_type'}, 'kernel_id': 1, 'ramdisk_id': 2, @@ -1606,7 +1605,7 @@ class CloudTestCase(test.TestCase): self.assertEqual(groupSet, expected_groupSet) self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'), {'instance_id': 'i-12345678', - 'instanceInitiatedShutdownBehavior': 'stop'}) + 'instanceInitiatedShutdownBehavior': 'stopped'}) self.assertEqual(get_attribute('instanceType'), {'instance_id': 'i-12345678', 'instanceType': 'fake_type'}) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 6659b81eb..766a7da9b 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -24,6 +24,7 @@ from nova import compute from nova.compute import instance_types from nova.compute import manager as compute_manager from nova.compute import power_state +from nova.compute import vm_states from nova import context from nova import db from nova.db.sqlalchemy import models @@ -763,8 +764,8 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', - 'state': power_state.RUNNING, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE, + 'task_state': None, 'host': i_ref['host']}) for v in i_ref['volumes']: dbmock.volume_update(c, v['id'], {'status': 'in-use'}) @@ -795,8 +796,8 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', - 'state': power_state.RUNNING, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE, + 'task_state': None, 'host': i_ref['host']}) self.compute.db = dbmock @@ -841,8 +842,8 @@ class ComputeTestCase(test.TestCase): c = context.get_admin_context() instance_id = self._create_instance() i_ref = db.instance_get(c, instance_id) - db.instance_update(c, i_ref['id'], {'state_description': 'migrating', - 'state': power_state.PAUSED}) + db.instance_update(c, i_ref['id'], {'vm_state': vm_states.MIGRATING, + 'power_state': power_state.PAUSED}) v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id}) fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1', 'instance_id': instance_id}) @@ -903,7 +904,7 @@ class ComputeTestCase(test.TestCase): instances = db.instance_get_all(context.get_admin_context()) LOG.info(_("After force-killing instances: %s"), instances) self.assertEqual(len(instances), 1) - self.assertEqual(power_state.SHUTOFF, instances[0]['state']) + self.assertEqual(power_state.NOSTATE, instances[0]['power_state']) def test_get_all_by_name_regexp(self): """Test searching instances by name (display_name)""" @@ -1323,25 +1324,28 @@ class ComputeTestCase(test.TestCase): """Test searching instances by state""" c = context.get_admin_context() - instance_id1 = self._create_instance({'state': power_state.SHUTDOWN}) + instance_id1 = self._create_instance({ + 'power_state': power_state.SHUTDOWN, + }) instance_id2 = self._create_instance({ - 'id': 2, - 'state': power_state.RUNNING}) + 'id': 2, + 'power_state': power_state.RUNNING, + }) instance_id3 = self._create_instance({ - 'id': 10, - 'state': power_state.RUNNING}) - + 'id': 10, + 'power_state': power_state.RUNNING, + }) instances = self.compute_api.get_all(c, - search_opts={'state': power_state.SUSPENDED}) + search_opts={'power_state': power_state.SUSPENDED}) self.assertEqual(len(instances), 0) instances = self.compute_api.get_all(c, - search_opts={'state': power_state.SHUTDOWN}) + search_opts={'power_state': power_state.SHUTDOWN}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0].id, instance_id1) instances = self.compute_api.get_all(c, - search_opts={'state': power_state.RUNNING}) + search_opts={'power_state': power_state.RUNNING}) self.assertEqual(len(instances), 2) instance_ids = [instance.id for instance in instances] self.assertTrue(instance_id2 in instance_ids) @@ -1349,7 +1353,7 @@ class ComputeTestCase(test.TestCase): # Test passing a list as search arg instances = self.compute_api.get_all(c, - search_opts={'state': [power_state.SHUTDOWN, + search_opts={'power_state': [power_state.SHUTDOWN, power_state.RUNNING]}) self.assertEqual(len(instances), 3) diff --git a/nova/tests/test_context.py b/nova/tests/test_context.py new file mode 100644 index 000000000..b2507fa59 --- /dev/null +++ b/nova/tests/test_context.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import context +from nova import test + + +class ContextTestCase(test.TestCase): + + def test_request_context_sets_is_admin(self): + ctxt = context.RequestContext('111', + '222', + roles=['admin', 'weasel']) + self.assertEquals(ctxt.is_admin, True) + + def test_request_context_sets_is_admin_upcase(self): + ctxt = context.RequestContext('111', + '222', + roles=['Admin', 'weasel']) + self.assertEquals(ctxt.is_admin, True) diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index 038c07f40..60d7abd8c 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -91,5 +91,7 @@ class DbApiTestCase(test.TestCase): inst2 = db.instance_create(self.context, args2) db.instance_destroy(self.context, inst1.id) result = db.instance_get_all_by_filters(self.context.elevated(), {}) - self.assertEqual(1, len(result)) + self.assertEqual(2, len(result)) self.assertEqual(result[0].id, inst2.id) + self.assertEqual(result[1].id, inst1.id) + self.assertTrue(result[1].deleted) diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index 04c1b5598..e1ba4aafb 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -48,7 +48,7 @@ class IPv6RFC2462TestCase(test.TestCase): def test_to_global_with_bad_prefix(self): bad_prefix = '82' self.assertRaises(TypeError, ipv6.to_global, - bad_prefix, + bad_prefix, '2001:db8::216:3eff:fe33:4455', 'test') diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 6a213b4f0..8c6775b29 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -34,6 +34,7 @@ from nova import test from nova import utils from nova.api.ec2 import cloud from nova.compute import power_state +from nova.compute import vm_states from nova.virt.libvirt import connection from nova.virt.libvirt import firewall @@ -674,8 +675,9 @@ class LibvirtConnTestCase(test.TestCase): # Preparing data self.compute = utils.import_object(FLAGS.compute_manager) - instance_dict = {'host': 'fake', 'state': power_state.RUNNING, - 'state_description': 'running'} + instance_dict = {'host': 'fake', + 'power_state': power_state.RUNNING, + 'vm_state': vm_states.ACTIVE} instance_ref = db.instance_create(self.context, self.test_instance) instance_ref = db.instance_update(self.context, instance_ref['id'], instance_dict) @@ -713,8 +715,8 @@ class LibvirtConnTestCase(test.TestCase): self.compute.rollback_live_migration) instance_ref = db.instance_get(self.context, instance_ref['id']) - self.assertTrue(instance_ref['state_description'] == 'running') - self.assertTrue(instance_ref['state'] == power_state.RUNNING) + self.assertTrue(instance_ref['vm_state'] == vm_states.ACTIVE) + self.assertTrue(instance_ref['power_state'] == power_state.RUNNING) volume_ref = db.volume_get(self.context, volume_ref['id']) self.assertTrue(volume_ref['status'] == 'in-use') diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 0b8539442..25ff940f0 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -371,6 +371,22 @@ class VlanNetworkTestCase(test.TestCase): self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) + def test_cant_associate_associated_floating_ip(self): + ctxt = context.RequestContext('testuser', 'testproject', + is_admin=False) + + def fake_floating_ip_get_by_address(context, address): + return {'address': '10.10.10.10', + 'fixed_ip': {'address': '10.0.0.1'}} + self.stubs.Set(self.network.db, 'floating_ip_get_by_address', + fake_floating_ip_get_by_address) + + self.assertRaises(exception.FloatingIpAlreadyInUse, + self.network.associate_floating_ip, + ctxt, + mox.IgnoreArg(), + mox.IgnoreArg()) + class CommonNetworkTestCase(test.TestCase): diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index ba9c0a859..6b4454747 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -22,168 +22,16 @@ Unit Tests for remote procedure calls using queue from nova import context from nova import log as logging from nova import rpc -from nova import test +from nova.tests import test_rpc_common LOG = logging.getLogger('nova.tests.rpc') -class RpcTestCase(test.TestCase): +class RpcTestCase(test_rpc_common._BaseRpcTestCase): def setUp(self): + self.rpc = rpc super(RpcTestCase, self).setUp() - self.conn = rpc.create_connection(True) - self.receiver = TestReceiver() - self.consumer = rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) - self.consumer.attach_to_eventlet() - self.context = context.get_admin_context() - def test_call_succeed(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo", - "args": {"value": value}}) - self.assertEqual(value, result) - - def test_call_succeed_despite_multiple_returns(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo_three_times", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_call_succeed_despite_multiple_returns_yield(self): - value = 42 - result = rpc.call(self.context, 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_multicall_succeed_once(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo", - "args": {"value": value}}) - for i, x in enumerate(result): - if i > 0: - self.fail('should only receive one response') - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times_yield(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_context_passed(self): - """Makes sure a context is passed through rpc call.""" - value = 42 - result = rpc.call(self.context, - 'test', {"method": "context", - "args": {"value": value}}) - self.assertEqual(self.context.to_dict(), result) - - def test_call_exception(self): - """Test that exception gets passed back properly. - - rpc.call returns a RemoteError object. The value of the - exception is converted to a string, so we convert it back - to an int in the test. - - """ - value = 42 - self.assertRaises(rpc.RemoteError, - rpc.call, - self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - try: - rpc.call(self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - self.fail("should have thrown rpc.RemoteError") - except rpc.RemoteError as exc: - self.assertEqual(int(exc.value), value) - - def test_nested_calls(self): - """Test that we can do an rpc.call inside another call.""" - class Nested(object): - @staticmethod - def echo(context, queue, value): - """Calls echo in the passed queue""" - LOG.debug(_("Nested received %(queue)s, %(value)s") - % locals()) - # TODO: so, it will replay the context and use the same REQID? - # that's bizarre. - ret = rpc.call(context, - queue, - {"method": "echo", - "args": {"value": value}}) - LOG.debug(_("Nested return %s"), ret) - return value - - nested = Nested() - conn = rpc.create_connection(True) - consumer = rpc.create_consumer(conn, - 'nested', - nested, - False) - consumer.attach_to_eventlet() - value = 42 - result = rpc.call(self.context, - 'nested', {"method": "echo", - "args": {"queue": "test", - "value": value}}) - self.assertEqual(value, result) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) + def tearDown(self): + super(RpcTestCase, self).tearDown() diff --git a/nova/tests/test_rpc_amqp.py b/nova/tests/test_rpc_amqp.py deleted file mode 100644 index 2215a908b..000000000 --- a/nova/tests/test_rpc_amqp.py +++ /dev/null @@ -1,88 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Openstack, LLC. -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests For RPC AMQP. -""" - -from nova import context -from nova import log as logging -from nova import rpc -from nova.rpc import amqp -from nova import test - - -LOG = logging.getLogger('nova.tests.rpc') - - -class RpcAMQPTestCase(test.TestCase): - def setUp(self): - super(RpcAMQPTestCase, self).setUp() - self.conn = rpc.create_connection(True) - self.receiver = TestReceiver() - self.consumer = rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) - self.consumer.attach_to_eventlet() - self.context = context.get_admin_context() - - def test_connectionpool_single(self): - """Test that ConnectionPool recycles a single connection.""" - conn1 = amqp.ConnectionPool.get() - amqp.ConnectionPool.put(conn1) - conn2 = amqp.ConnectionPool.get() - amqp.ConnectionPool.put(conn2) - self.assertEqual(conn1, conn2) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) diff --git a/nova/tests/test_rpc_carrot.py b/nova/tests/test_rpc_carrot.py new file mode 100644 index 000000000..57cdebf4f --- /dev/null +++ b/nova/tests/test_rpc_carrot.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using carrot +""" + +from nova import context +from nova import log as logging +from nova.rpc import impl_carrot +from nova.tests import test_rpc_common + + +LOG = logging.getLogger('nova.tests.rpc') + + +class RpcCarrotTestCase(test_rpc_common._BaseRpcTestCase): + def setUp(self): + self.rpc = impl_carrot + super(RpcCarrotTestCase, self).setUp() + + def tearDown(self): + super(RpcCarrotTestCase, self).tearDown() + + def test_connectionpool_single(self): + """Test that ConnectionPool recycles a single connection.""" + conn1 = self.rpc.ConnectionPool.get() + self.rpc.ConnectionPool.put(conn1) + conn2 = self.rpc.ConnectionPool.get() + self.rpc.ConnectionPool.put(conn2) + self.assertEqual(conn1, conn2) diff --git a/nova/tests/test_rpc_common.py b/nova/tests/test_rpc_common.py new file mode 100644 index 000000000..4ab4e8a0e --- /dev/null +++ b/nova/tests/test_rpc_common.py @@ -0,0 +1,189 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls shared between all implementations +""" + +from nova import context +from nova import log as logging +from nova.rpc.common import RemoteError +from nova import test + + +LOG = logging.getLogger('nova.tests.rpc') + + +class _BaseRpcTestCase(test.TestCase): + def setUp(self): + super(_BaseRpcTestCase, self).setUp() + self.conn = self.rpc.create_connection(True) + self.receiver = TestReceiver() + self.conn.create_consumer('test', self.receiver, False) + self.conn.consume_in_thread() + self.context = context.get_admin_context() + + def tearDown(self): + self.conn.close() + super(_BaseRpcTestCase, self).tearDown() + + def test_call_succeed(self): + value = 42 + result = self.rpc.call(self.context, 'test', {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns(self): + value = 42 + result = self.rpc.call(self.context, 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + value = 42 + result = self.rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_multicall_succeed_once(self): + value = 42 + result = self.rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + for i, x in enumerate(result): + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times(self): + value = 42 + result = self.rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times_yield(self): + value = 42 + result = self.rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_context_passed(self): + """Makes sure a context is passed through rpc call.""" + value = 42 + result = self.rpc.call(self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + + def test_call_exception(self): + """Test that exception gets passed back properly. + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + + """ + value = 42 + self.assertRaises(RemoteError, + self.rpc.call, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + try: + self.rpc.call(self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown RemoteError") + except RemoteError as exc: + self.assertEqual(int(exc.value), value) + + def test_nested_calls(self): + """Test that we can do an rpc.call inside another call.""" + class Nested(object): + @staticmethod + def echo(context, queue, value): + """Calls echo in the passed queue""" + LOG.debug(_("Nested received %(queue)s, %(value)s") + % locals()) + # TODO: so, it will replay the context and use the same REQID? + # that's bizarre. + ret = self.rpc.call(context, + queue, + {"method": "echo", + "args": {"value": value}}) + LOG.debug(_("Nested return %s"), ret) + return value + + nested = Nested() + conn = self.rpc.create_connection(True) + conn.create_consumer('nested', nested, False) + conn.consume_in_thread() + value = 42 + result = self.rpc.call(self.context, + 'nested', {"method": "echo", + "args": {"queue": "test", + "value": value}}) + conn.close() + self.assertEqual(value, result) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. + + """ + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in.""" + LOG.debug(_("Received %s"), value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context.""" + LOG.debug(_("Received %s"), context) + return context.to_dict() + + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value + 1) + context.reply(value + 2) + + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in.""" + raise Exception(value) diff --git a/nova/tests/test_rpc_kombu.py b/nova/tests/test_rpc_kombu.py new file mode 100644 index 000000000..101ed14af --- /dev/null +++ b/nova/tests/test_rpc_kombu.py @@ -0,0 +1,110 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using kombu +""" + +from nova import context +from nova import log as logging +from nova import test +from nova.rpc import impl_kombu +from nova.tests import test_rpc_common + + +LOG = logging.getLogger('nova.tests.rpc') + + +class RpcKombuTestCase(test_rpc_common._BaseRpcTestCase): + def setUp(self): + self.rpc = impl_kombu + super(RpcKombuTestCase, self).setUp() + + def tearDown(self): + super(RpcKombuTestCase, self).tearDown() + + def test_reusing_connection(self): + """Test that reusing a connection returns same one.""" + conn_context = self.rpc.create_connection(new=False) + conn1 = conn_context.connection + conn_context.close() + conn_context = self.rpc.create_connection(new=False) + conn2 = conn_context.connection + conn_context.close() + self.assertEqual(conn1, conn2) + + def test_topic_send_receive(self): + """Test sending to a topic exchange/queue""" + + conn = self.rpc.create_connection() + message = 'topic test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_topic_consumer('a_topic', _callback) + conn.topic_send('a_topic', message) + conn.consume(limit=1) + conn.close() + + self.assertEqual(self.received_message, message) + + def test_direct_send_receive(self): + """Test sending to a direct exchange/queue""" + conn = self.rpc.create_connection() + message = 'direct test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_direct_consumer('a_direct', _callback) + conn.direct_send('a_direct', message) + conn.consume(limit=1) + conn.close() + + self.assertEqual(self.received_message, message) + + @test.skip_test("kombu memory transport seems buggy with fanout queues " + "as this test passes when you use rabbit (fake_rabbit=False)") + def test_fanout_send_receive(self): + """Test sending to a fanout exchange and consuming from 2 queues""" + + conn = self.rpc.create_connection() + conn2 = self.rpc.create_connection() + message = 'fanout test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_fanout_consumer('a_fanout', _callback) + conn2.declare_fanout_consumer('a_fanout', _callback) + conn.fanout_send('a_fanout', message) + + conn.consume(limit=1) + conn.close() + self.assertEqual(self.received_message, message) + + self.received_message = None + conn2.consume(limit=1) + conn2.close() + self.assertEqual(self.received_message, message) diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py index 64f11fa45..3482ff6a0 100644 --- a/nova/tests/test_test.py +++ b/nova/tests/test_test.py @@ -40,6 +40,5 @@ class IsolationTestCase(test.TestCase): connection = rpc.create_connection(new=True) proxy = NeverCalled() - consumer = rpc.create_consumer(connection, 'compute', - proxy, fanout=False) - consumer.attach_to_eventlet() + connection.create_consumer('compute', proxy, fanout=False) + connection.consume_in_thread() diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py new file mode 100644 index 000000000..3d2d2de13 --- /dev/null +++ b/nova/tests/test_vsa.py @@ -0,0 +1,182 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import stubout + +from xml.etree import ElementTree +from xml.etree.ElementTree import Element, SubElement + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import test +from nova import vsa +from nova import volume +from nova.volume import volume_types +from nova.vsa import utils as vsa_utils + +import nova.image.fake + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.vsa') + + +class VsaTestCase(test.TestCase): + + def setUp(self): + super(VsaTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.vsa_api = vsa.API() + self.volume_api = volume.API() + + FLAGS.quota_volumes = 100 + FLAGS.quota_gigabytes = 10000 + + self.context = context.get_admin_context() + + volume_types.create(self.context, + 'SATA_500_7200', + extra_specs={'type': 'vsa_drive', + 'drive_name': 'SATA_500_7200', + 'drive_type': 'SATA', + 'drive_size': '500', + 'drive_rpm': '7200'}) + + def fake_show_by_name(meh, context, name): + if name == 'wrong_image_name': + LOG.debug(_("Test: Emulate wrong VSA name. Raise")) + raise exception.ImageNotFound + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} + + self.stubs.Set(nova.image.fake._FakeImageService, + 'show_by_name', + fake_show_by_name) + + def tearDown(self): + self.stubs.UnsetAll() + super(VsaTestCase, self).tearDown() + + def test_vsa_create_delete_defaults(self): + param = {'display_name': 'VSA name test'} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['display_name'], param['display_name']) + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_create_delete_check_in_db(self): + vsa_list1 = self.vsa_api.get_all(self.context) + vsa_ref = self.vsa_api.create(self.context) + vsa_list2 = self.vsa_api.get_all(self.context) + self.assertEqual(len(vsa_list2), len(vsa_list1) + 1) + + self.vsa_api.delete(self.context, vsa_ref['id']) + vsa_list3 = self.vsa_api.get_all(self.context) + self.assertEqual(len(vsa_list3), len(vsa_list2) - 1) + + def test_vsa_create_delete_high_vc_count(self): + param = {'vc_count': FLAGS.max_vcs_in_vsa + 1} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa) + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_create_wrong_image_name(self): + param = {'image_name': 'wrong_image_name'} + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context, **param) + + def test_vsa_create_db_error(self): + + def fake_vsa_create(context, options): + LOG.debug(_("Test: Emulate DB error. Raise")) + raise exception.Error + + self.stubs.Set(nova.db.api, 'vsa_create', fake_vsa_create) + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context) + + def test_vsa_create_wrong_storage_params(self): + vsa_list1 = self.vsa_api.get_all(self.context) + param = {'storage': [{'stub': 1}]} + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context, **param) + vsa_list2 = self.vsa_api.get_all(self.context) + self.assertEqual(len(vsa_list2), len(vsa_list1)) + + param = {'storage': [{'drive_name': 'wrong name'}]} + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context, **param) + + def test_vsa_create_with_storage(self, multi_vol_creation=True): + """Test creation of VSA with BE storage""" + + FLAGS.vsa_multi_vol_creation = multi_vol_creation + + param = {'storage': [{'drive_name': 'SATA_500_7200', + 'num_drives': 3}]} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['vol_count'], 3) + self.vsa_api.delete(self.context, vsa_ref['id']) + + param = {'storage': [{'drive_name': 'SATA_500_7200', + 'num_drives': 3}], + 'shared': True} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['vol_count'], 15) + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_create_with_storage_single_volumes(self): + self.test_vsa_create_with_storage(multi_vol_creation=False) + + def test_vsa_update(self): + vsa_ref = self.vsa_api.create(self.context) + + param = {'vc_count': FLAGS.max_vcs_in_vsa + 1} + vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param) + self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa) + + param = {'vc_count': 2} + vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param) + self.assertEqual(vsa_ref['vc_count'], 2) + + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_generate_user_data(self): + + FLAGS.vsa_multi_vol_creation = False + param = {'display_name': 'VSA name test', + 'display_description': 'VSA desc test', + 'vc_count': 2, + 'storage': [{'drive_name': 'SATA_500_7200', + 'num_drives': 3}]} + vsa_ref = self.vsa_api.create(self.context, **param) + volumes = self.vsa_api.get_all_vsa_drives(self.context, + vsa_ref['id']) + + user_data = vsa_utils.generate_user_data(vsa_ref, volumes) + user_data = base64.b64decode(user_data) + + LOG.debug(_("Test: user_data = %s"), user_data) + + elem = ElementTree.fromstring(user_data) + self.assertEqual(elem.findtext('name'), + param['display_name']) + self.assertEqual(elem.findtext('description'), + param['display_description']) + self.assertEqual(elem.findtext('vc_count'), + str(param['vc_count'])) + + self.vsa_api.delete(self.context, vsa_ref['id']) diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py new file mode 100644 index 000000000..b7cd4e840 --- /dev/null +++ b/nova/tests/test_vsa_volumes.py @@ -0,0 +1,136 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stubout + +from nova import exception +from nova import flags +from nova import vsa +from nova import volume +from nova import db +from nova import context +from nova import test +from nova import log as logging +import nova.image.fake + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.vsa.volumes') + + +class VsaVolumesTestCase(test.TestCase): + + def setUp(self): + super(VsaVolumesTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.vsa_api = vsa.API() + self.volume_api = volume.API() + self.context = context.get_admin_context() + + self.default_vol_type = self.vsa_api.get_vsa_volume_type(self.context) + + def fake_show_by_name(meh, context, name): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} + + self.stubs.Set(nova.image.fake._FakeImageService, + 'show_by_name', + fake_show_by_name) + + param = {'display_name': 'VSA name test'} + vsa_ref = self.vsa_api.create(self.context, **param) + self.vsa_id = vsa_ref['id'] + + def tearDown(self): + if self.vsa_id: + self.vsa_api.delete(self.context, self.vsa_id) + self.stubs.UnsetAll() + super(VsaVolumesTestCase, self).tearDown() + + def _default_volume_param(self): + return { + 'size': 1, + 'snapshot_id': None, + 'name': 'Test volume name', + 'description': 'Test volume desc name', + 'volume_type': self.default_vol_type, + 'metadata': {'from_vsa_id': self.vsa_id} + } + + def _get_all_volumes_by_vsa(self): + return self.volume_api.get_all(self.context, + search_opts={'metadata': {"from_vsa_id": str(self.vsa_id)}}) + + def test_vsa_volume_create_delete(self): + """ Check if volume properly created and deleted. """ + volume_param = self._default_volume_param() + volume_ref = self.volume_api.create(self.context, **volume_param) + + self.assertEqual(volume_ref['display_name'], + volume_param['name']) + self.assertEqual(volume_ref['display_description'], + volume_param['description']) + self.assertEqual(volume_ref['size'], + volume_param['size']) + self.assertEqual(volume_ref['status'], + 'creating') + + vols2 = self._get_all_volumes_by_vsa() + self.assertEqual(1, len(vols2)) + volume_ref = vols2[0] + + self.assertEqual(volume_ref['display_name'], + volume_param['name']) + self.assertEqual(volume_ref['display_description'], + volume_param['description']) + self.assertEqual(volume_ref['size'], + volume_param['size']) + self.assertEqual(volume_ref['status'], + 'creating') + + self.volume_api.update(self.context, + volume_ref['id'], {'status': 'available'}) + self.volume_api.delete(self.context, volume_ref['id']) + + vols3 = self._get_all_volumes_by_vsa() + self.assertEqual(1, len(vols2)) + volume_ref = vols3[0] + self.assertEqual(volume_ref['status'], + 'deleting') + + def test_vsa_volume_delete_nonavail_volume(self): + """ Check volume deleton in different states. """ + volume_param = self._default_volume_param() + volume_ref = self.volume_api.create(self.context, **volume_param) + + self.volume_api.update(self.context, + volume_ref['id'], {'status': 'in-use'}) + self.assertRaises(exception.ApiError, + self.volume_api.delete, + self.context, volume_ref['id']) + + def test_vsa_volume_delete_vsa_with_volumes(self): + """ Check volume deleton in different states. """ + + vols1 = self._get_all_volumes_by_vsa() + for i in range(3): + volume_param = self._default_volume_param() + volume_ref = self.volume_api.create(self.context, **volume_param) + + vols2 = self._get_all_volumes_by_vsa() + self.assertEqual(len(vols1) + 3, len(vols2)) + + self.vsa_api.delete(self.context, self.vsa_id) + + vols3 = self._get_all_volumes_by_vsa() + self.assertEqual(len(vols1), len(vols3)) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 2f0559366..45dad3516 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -16,7 +16,6 @@ """Test suite for XenAPI.""" -import eventlet import functools import json import os @@ -203,42 +202,6 @@ class XenAPIVMTestCase(test.TestCase): self.context = context.RequestContext(self.user_id, self.project_id) self.conn = xenapi_conn.get_connection(False) - def test_parallel_builds(self): - stubs.stubout_loopingcall_delay(self.stubs) - - def _do_build(id, proj, user, *args): - values = { - 'id': id, - 'project_id': proj, - 'user_id': user, - 'image_ref': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'instance_type_id': '3', # m1.large - 'os_type': 'linux', - 'architecture': 'x86-64'} - network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, - {'broadcast': '192.168.0.255', - 'dns': ['192.168.0.1'], - 'gateway': '192.168.0.1', - 'gateway6': 'dead:beef::1', - 'ip6s': [{'enabled': '1', - 'ip': 'dead:beef::dcad:beff:feef:0', - 'netmask': '64'}], - 'ips': [{'enabled': '1', - 'ip': '192.168.0.100', - 'netmask': '255.255.255.0'}], - 'label': 'fake', - 'mac': 'DE:AD:BE:EF:00:00', - 'rxtx_cap': 3})] - instance = db.instance_create(self.context, values) - self.conn.spawn(self.context, instance, network_info) - - gt1 = eventlet.spawn(_do_build, 1, self.project_id, self.user_id) - gt2 = eventlet.spawn(_do_build, 2, self.project_id, self.user_id) - gt1.wait() - gt2.wait() - def test_list_instances_0(self): instances = self.conn.list_instances() self.assertEquals(instances, []) diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index afd672c7a..0d896239a 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -23,6 +23,8 @@ import time from nova import db
from nova import utils
+from nova.compute import task_states
+from nova.compute import vm_states
def stub_out_db_instance_api(stubs):
@@ -64,7 +66,8 @@ def stub_out_db_instance_api(stubs): 'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
- 'state_description': 'scheduling',
+ 'vm_state': vm_states.BUILDING,
+ 'task_state': task_states.SCHEDULING,
'user_id': values['user_id'],
'project_id': values['project_id'],
'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index a6a1febd6..647a4c1df 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -18,6 +18,8 @@ import eventlet import json +import random + from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils @@ -191,6 +193,7 @@ class FakeSessionForVMTests(fake.SessionBase): vm['power_state'] = 'Running' vm['is_a_template'] = False vm['is_control_domain'] = False + vm['domid'] = random.randrange(1, 1 << 16) def VM_snapshot(self, session_ref, vm_ref, label): status = "Running" @@ -290,6 +293,7 @@ class FakeSessionForMigrationTests(fake.SessionBase): vm['power_state'] = 'Running' vm['is_a_template'] = False vm['is_control_domain'] = False + vm['domid'] = random.randrange(1, 1 << 16) def stub_out_migration_methods(stubs): diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index 6a02cfa24..d3aeadda4 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -135,7 +135,9 @@ <interface type='bridge'> <source bridge='${nic.bridge_name}'/> <mac address='${nic.mac_address}'/> - <!-- <model type='virtio'/> CANT RUN virtio network right now --> +#if $getVar('use_virtio_for_bridges', True) + <model type='virtio'/> +#end if <filterref filter="nova-instance-${name}-${nic.id}"> <parameter name="IP" value="${nic.ip_address}" /> <parameter name="DHCPSERVER" value="${nic.dhcp_server}" /> diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 4388291db..363a20ed0 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -135,6 +135,9 @@ flags.DEFINE_string('default_local_format', None, 'The default format a local_volume will be formatted with ' 'on creation.') +flags.DEFINE_bool('libvirt_use_virtio_for_bridges', + False, + 'Use virtio for bridge interfaces') def get_connection(read_only): @@ -1083,6 +1086,8 @@ class LibvirtConnection(driver.ComputeDriver): 'ebs_root': ebs_root, 'local_device': local_device, 'volumes': block_device_mapping, + 'use_virtio_for_bridges': + FLAGS.libvirt_use_virtio_for_bridges, 'ephemerals': ephemerals} root_device_name = driver.block_device_info_get_root(block_device_info) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 7c91aa9b9..97dfd9fa9 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -51,6 +51,7 @@ A fake XenAPI SDK. """ +import random import uuid from pprint import pformat @@ -103,8 +104,10 @@ def create_network(name_label, bridge): def create_vm(name_label, status, is_a_template=False, is_control_domain=False): + domid = status == 'Running' and random.randrange(1, 1 << 16) or -1 return _create_object('VM', {'name_label': name_label, + 'domid': domid, 'power-state': status, 'is_a_template': is_a_template, 'is_control_domain': is_control_domain}) diff --git a/nova/volume/api.py b/nova/volume/api.py index 195ab24aa..d9c082514 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -132,7 +132,7 @@ class API(base.Base): for i in volume.get('volume_metadata'): volume_metadata[i['key']] = i['value'] - for k, v in searchdict: + for k, v in searchdict.iteritems(): if k not in volume_metadata.keys()\ or volume_metadata[k] != v: return False @@ -141,6 +141,7 @@ class API(base.Base): # search_option to filter_name mapping. filter_mapping = {'metadata': _check_metadata_match} + result = [] for volume in volumes: # go over all filters in the list for opt, values in search_opts.iteritems(): @@ -150,10 +151,10 @@ class API(base.Base): # no such filter - ignore it, go to next filter continue else: - if filter_func(volume, values) == False: - # if one of conditions didn't match - remove - volumes.remove(volume) + if filter_func(volume, values): + result.append(volume) break + volumes = result return volumes def get_snapshot(self, context, snapshot_id): @@ -255,3 +256,12 @@ class API(base.Base): self.db.volume_metadata_update(context, volume_id, _metadata, True) return _metadata + + def get_volume_metadata_value(self, volume, key): + """Get value of particular metadata key.""" + metadata = volume.get('volume_metadata') + if metadata: + for i in volume['volume_metadata']: + if i['key'] == key: + return i['value'] + return None diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 06e3d7afd..35e3ea8d0 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -22,11 +22,13 @@ Drivers for volumes. import time import os +from xml.etree import ElementTree from nova import exception from nova import flags from nova import log as logging from nova import utils +from nova.volume import volume_types LOG = logging.getLogger("nova.volume.driver") @@ -212,6 +214,11 @@ class VolumeDriver(object): """Make sure volume is exported.""" raise NotImplementedError() + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. If 'refresh' is + True, run the update first.""" + return None + class AOEDriver(VolumeDriver): """Implements AOE specific volume commands.""" @@ -802,3 +809,268 @@ class LoggingVolumeDriver(VolumeDriver): if match: matches.append(entry) return matches + + +class ZadaraBEDriver(ISCSIDriver): + """Performs actions to configure Zadara BE module.""" + + def _is_vsa_volume(self, volume): + return volume_types.is_vsa_volume(volume['volume_type_id']) + + def _is_vsa_drive(self, volume): + return volume_types.is_vsa_drive(volume['volume_type_id']) + + def _not_vsa_volume_or_drive(self, volume): + """Returns True if volume is not VSA BE volume.""" + if not volume_types.is_vsa_object(volume['volume_type_id']): + LOG.debug(_("\tVolume %s is NOT VSA volume"), volume['name']) + return True + else: + return False + + def check_for_setup_error(self): + """No setup necessary for Zadara BE.""" + pass + + """ Volume Driver methods """ + def create_volume(self, volume): + """Creates BE volume.""" + if self._not_vsa_volume_or_drive(volume): + return super(ZadaraBEDriver, self).create_volume(volume) + + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s creation - do nothing"), + volume['name']) + return + + if int(volume['size']) == 0: + sizestr = '0' # indicates full-partition + else: + sizestr = '%s' % (int(volume['size']) << 30) # size in bytes + + # Set the qos-str to default type sas + qosstr = 'SAS_1000' + volume_type = volume_types.get_volume_type(None, + volume['volume_type_id']) + if volume_type is not None: + qosstr = volume_type['extra_specs']['drive_type'] + \ + ("_%s" % volume_type['extra_specs']['drive_size']) + + vsa_id = None + for i in volume.get('volume_metadata'): + if i['key'] == 'to_vsa_id': + vsa_id = i['value'] + break + + try: + self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', + 'create_qospart', + '--qos', qosstr, + '--pname', volume['name'], + '--psize', sizestr, + '--vsaid', vsa_id, + run_as_root=True, + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("VSA BE create_volume for %s failed"), volume['name']) + raise + + LOG.debug(_("VSA BE create_volume for %s succeeded"), volume['name']) + + def delete_volume(self, volume): + """Deletes BE volume.""" + if self._not_vsa_volume_or_drive(volume): + return super(ZadaraBEDriver, self).delete_volume(volume) + + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s deletion - do nothing"), + volume['name']) + return + + try: + self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', + 'delete_partition', + '--pname', volume['name'], + run_as_root=True, + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name']) + return + + LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name']) + + def local_path(self, volume): + if self._not_vsa_volume_or_drive(volume): + return super(ZadaraBEDriver, self).local_path(volume) + + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s local path call - call discover"), + volume['name']) + return super(ZadaraBEDriver, self).discover_volume(None, volume) + + raise exception.Error(_("local_path not supported")) + + def ensure_export(self, context, volume): + """ensure BE export for a volume""" + if self._not_vsa_volume_or_drive(volume): + return super(ZadaraBEDriver, self).ensure_export(context, volume) + + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s ensure export - do nothing"), + volume['name']) + return + + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping ensure_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + + try: + ret = self._common_be_export(context, volume, iscsi_target) + except exception.ProcessExecutionError: + return + return ret + + def create_export(self, context, volume): + """create BE export for a volume""" + if self._not_vsa_volume_or_drive(volume): + return super(ZadaraBEDriver, self).create_export(context, volume) + + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s create export - do nothing"), + volume['name']) + return + + self._ensure_iscsi_targets(context, volume['host']) + iscsi_target = self.db.volume_allocate_iscsi_target(context, + volume['id'], + volume['host']) + try: + ret = self._common_be_export(context, volume, iscsi_target) + except: + raise exception.ProcessExecutionError + return ret + + def remove_export(self, context, volume): + """Removes BE export for a volume.""" + if self._not_vsa_volume_or_drive(volume): + return super(ZadaraBEDriver, self).remove_export(context, volume) + + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s remove export - do nothing"), + volume['name']) + return + + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + + try: + self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', + 'remove_export', + '--pname', volume['name'], + '--tid', iscsi_target, + run_as_root=True, + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("VSA BE remove_export for %s failed"), volume['name']) + return + + def create_snapshot(self, snapshot): + """Nothing required for snapshot""" + if self._not_vsa_volume_or_drive(volume): + return super(ZadaraBEDriver, self).create_snapshot(volume) + + pass + + def delete_snapshot(self, snapshot): + """Nothing required to delete a snapshot""" + if self._not_vsa_volume_or_drive(volume): + return super(ZadaraBEDriver, self).delete_snapshot(volume) + + pass + + """ Internal BE Volume methods """ + def _common_be_export(self, context, volume, iscsi_target): + """ + Common logic that asks zadara_sncfg to setup iSCSI target/lun for + this volume + """ + (out, err) = self._sync_exec( + '/var/lib/zadara/bin/zadara_sncfg', + 'create_export', + '--pname', volume['name'], + '--tid', iscsi_target, + run_as_root=True, + check_exit_code=0) + + result_xml = ElementTree.fromstring(out) + response_node = result_xml.find("Sn") + if response_node is None: + msg = "Malformed response from zadara_sncfg" + raise exception.Error(msg) + + sn_ip = response_node.findtext("SnIp") + sn_iqn = response_node.findtext("IqnName") + iscsi_portal = sn_ip + ":3260," + ("%s" % iscsi_target) + + model_update = {} + model_update['provider_location'] = ("%s %s" % + (iscsi_portal, + sn_iqn)) + return model_update + + def _get_qosgroup_summary(self): + """gets the list of qosgroups from Zadara BE""" + try: + (out, err) = self._sync_exec( + '/var/lib/zadara/bin/zadara_sncfg', + 'get_qosgroups_xml', + run_as_root=True, + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("Failed to retrieve QoS info")) + return {} + + qos_groups = {} + result_xml = ElementTree.fromstring(out) + for element in result_xml.findall('QosGroup'): + qos_group = {} + # get the name of the group. + # If we cannot find it, forget this element + group_name = element.findtext("Name") + if not group_name: + continue + + # loop through all child nodes & fill up attributes of this group + for child in element.getchildren(): + # two types of elements - property of qos-group & sub property + # classify them accordingly + if child.text: + qos_group[child.tag] = int(child.text) \ + if child.text.isdigit() else child.text + else: + subelement = {} + for subchild in child.getchildren(): + subelement[subchild.tag] = int(subchild.text) \ + if subchild.text.isdigit() else subchild.text + qos_group[child.tag] = subelement + + # Now add this group to the master qos_groups + qos_groups[group_name] = qos_group + + return qos_groups + + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. If 'refresh' is + True, run the update first.""" + + drive_info = self._get_qosgroup_summary() + return {'drive_qos_info': drive_info} diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 798bd379a..caa5298d4 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -48,7 +48,9 @@ from nova import exception from nova import flags from nova import log as logging from nova import manager +from nova import rpc from nova import utils +from nova.volume import volume_types LOG = logging.getLogger('nova.volume.manager') @@ -60,6 +62,8 @@ flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver', 'Driver to use for volume creation') flags.DEFINE_boolean('use_local_volumes', True, 'if True, will not discover local volumes') +flags.DEFINE_boolean('volume_force_update_capabilities', False, + 'if True will force update capabilities on each check') class VolumeManager(manager.SchedulerDependentManager): @@ -74,6 +78,7 @@ class VolumeManager(manager.SchedulerDependentManager): # NOTE(vish): Implementation specific db handling is done # by the driver. self.driver.db = self.db + self._last_volume_stats = [] def init_host(self): """Do any initialization that needs to be run if this is a @@ -123,6 +128,7 @@ class VolumeManager(manager.SchedulerDependentManager): except Exception: self.db.volume_update(context, volume_ref['id'], {'status': 'error'}) + self._notify_vsa(context, volume_ref, 'error') raise now = utils.utcnow() @@ -130,8 +136,29 @@ class VolumeManager(manager.SchedulerDependentManager): volume_ref['id'], {'status': 'available', 'launched_at': now}) LOG.debug(_("volume %s: created successfully"), volume_ref['name']) + self._notify_vsa(context, volume_ref, 'available') + self._reset_stats() return volume_id + def _notify_vsa(self, context, volume_ref, status): + if volume_ref['volume_type_id'] is None: + return + + if volume_types.is_vsa_drive(volume_ref['volume_type_id']): + vsa_id = None + for i in volume_ref.get('volume_metadata'): + if i['key'] == 'to_vsa_id': + vsa_id = int(i['value']) + break + + if vsa_id: + rpc.cast(context, + FLAGS.vsa_topic, + {"method": "vsa_volume_created", + "args": {"vol_id": volume_ref['id'], + "vsa_id": vsa_id, + "status": status}}) + def delete_volume(self, context, volume_id): """Deletes and unexports volume.""" context = context.elevated() @@ -141,6 +168,7 @@ class VolumeManager(manager.SchedulerDependentManager): if volume_ref['host'] != self.host: raise exception.Error(_("Volume is not local to this node")) + self._reset_stats() try: LOG.debug(_("volume %s: removing export"), volume_ref['name']) self.driver.remove_export(context, volume_ref) @@ -231,3 +259,53 @@ class VolumeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get(context, instance_id) for volume in instance_ref['volumes']: self.driver.check_for_export(context, volume['id']) + + def periodic_tasks(self, context=None): + """Tasks to be run at a periodic interval.""" + + error_list = [] + try: + self._report_driver_status() + except Exception as ex: + LOG.warning(_("Error during report_driver_status(): %s"), + unicode(ex)) + error_list.append(ex) + + super(VolumeManager, self).periodic_tasks(context) + + return error_list + + def _volume_stats_changed(self, stat1, stat2): + if FLAGS.volume_force_update_capabilities: + return True + if len(stat1) != len(stat2): + return True + for (k, v) in stat1.iteritems(): + if (k, v) not in stat2.iteritems(): + return True + return False + + def _report_driver_status(self): + volume_stats = self.driver.get_volume_stats(refresh=True) + if volume_stats: + LOG.info(_("Checking volume capabilities")) + + if self._volume_stats_changed(self._last_volume_stats, + volume_stats): + LOG.info(_("New capabilities found: %s"), volume_stats) + self._last_volume_stats = volume_stats + + # This will grab info about the host and queue it + # to be sent to the Schedulers. + self.update_service_capabilities(self._last_volume_stats) + else: + # avoid repeating fanouts + self.update_service_capabilities(None) + + def _reset_stats(self): + LOG.info(_("Clear capabilities")) + self._last_volume_stats = [] + + def notification(self, context, event): + LOG.info(_("Notification {%s} received"), event) + self._reset_stats() diff --git a/nova/volume/volume_types.py b/nova/volume/volume_types.py index 9b02d4ccc..ffa9e6e02 100644 --- a/nova/volume/volume_types.py +++ b/nova/volume/volume_types.py @@ -100,20 +100,22 @@ def get_all_types(context, inactive=0, search_opts={}): continue else: if filter_func(type_args, values): - # if one of conditions didn't match - remove result[type_name] = type_args break vol_types = result return vol_types -def get_volume_type(context, id): +def get_volume_type(ctxt, id): """Retrieves single volume type by id.""" if id is None: raise exception.InvalidVolumeType(volume_type=id) + if ctxt is None: + ctxt = context.get_admin_context() + try: - return db.volume_type_get(context, id) + return db.volume_type_get(ctxt, id) except exception.DBError: raise exception.ApiError(_("Unknown volume type: %s") % id) @@ -127,3 +129,38 @@ def get_volume_type_by_name(context, name): return db.volume_type_get_by_name(context, name) except exception.DBError: raise exception.ApiError(_("Unknown volume type: %s") % name) + + +def is_key_value_present(volume_type_id, key, value, volume_type=None): + if volume_type_id is None: + return False + + if volume_type is None: + volume_type = get_volume_type(context.get_admin_context(), + volume_type_id) + if volume_type.get('extra_specs') is None or\ + volume_type['extra_specs'].get(key) != value: + return False + else: + return True + + +def is_vsa_drive(volume_type_id, volume_type=None): + return is_key_value_present(volume_type_id, + 'type', 'vsa_drive', volume_type) + + +def is_vsa_volume(volume_type_id, volume_type=None): + return is_key_value_present(volume_type_id, + 'type', 'vsa_volume', volume_type) + + +def is_vsa_object(volume_type_id): + if volume_type_id is None: + return False + + volume_type = get_volume_type(context.get_admin_context(), + volume_type_id) + + return is_vsa_drive(volume_type_id, volume_type) or\ + is_vsa_volume(volume_type_id, volume_type) diff --git a/nova/vsa/__init__.py b/nova/vsa/__init__.py new file mode 100644 index 000000000..09162e006 --- /dev/null +++ b/nova/vsa/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.vsa.api import API diff --git a/nova/vsa/api.py b/nova/vsa/api.py new file mode 100644 index 000000000..18cf13705 --- /dev/null +++ b/nova/vsa/api.py @@ -0,0 +1,411 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to Virtual Storage Arrays (VSAs). + +Experimental code. Requires special VSA image. +For assistance and guidelines pls contact + Zadara Storage Inc & Openstack community +""" + +import sys + +from nova import compute +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import rpc +from nova import volume +from nova.compute import instance_types +from nova.db import base +from nova.volume import volume_types + + +class VsaState: + CREATING = 'creating' # VSA creating (not ready yet) + LAUNCHING = 'launching' # Launching VCs (all BE volumes were created) + CREATED = 'created' # VSA fully created and ready for use + PARTIAL = 'partial' # Some BE drives were allocated + FAILED = 'failed' # Some BE storage allocations failed + DELETING = 'deleting' # VSA started the deletion procedure + + +FLAGS = flags.FLAGS +flags.DEFINE_string('vsa_ec2_access_key', None, + 'EC2 access key used by VSA for accessing nova') +flags.DEFINE_string('vsa_ec2_user_id', None, + 'User ID used by VSA for accessing nova') +flags.DEFINE_boolean('vsa_multi_vol_creation', True, + 'Ask scheduler to create multiple volumes in one call') +flags.DEFINE_string('vsa_volume_type_name', 'VSA volume type', + 'Name of volume type associated with FE VSA volumes') + +LOG = logging.getLogger('nova.vsa') + + +class API(base.Base): + """API for interacting with the VSA manager.""" + + def __init__(self, compute_api=None, volume_api=None, **kwargs): + self.compute_api = compute_api or compute.API() + self.volume_api = volume_api or volume.API() + super(API, self).__init__(**kwargs) + + def _check_volume_type_correctness(self, vol_type): + if vol_type.get('extra_specs') == None or\ + vol_type['extra_specs'].get('type') != 'vsa_drive' or\ + vol_type['extra_specs'].get('drive_type') == None or\ + vol_type['extra_specs'].get('drive_size') == None: + + raise exception.ApiError(_("Invalid drive type %s") + % vol_type['name']) + + def _get_default_vsa_instance_type(self): + return instance_types.get_instance_type_by_name( + FLAGS.default_vsa_instance_type) + + def _check_storage_parameters(self, context, vsa_name, storage, + shared, first_index=0): + """ + Translates storage array of disks to the list of volumes + :param storage: List of dictionaries with following keys: + disk_name, num_disks, size + :param shared: Specifies if storage is dedicated or shared. + For shared storage disks split into partitions + """ + volume_params = [] + for node in storage: + + name = node.get('drive_name', None) + num_disks = node.get('num_drives', 1) + + if name is None: + raise exception.ApiError(_("No drive_name param found in %s") + % node) + try: + vol_type = volume_types.get_volume_type_by_name(context, name) + except exception.NotFound: + raise exception.ApiError(_("Invalid drive type name %s") + % name) + + self._check_volume_type_correctness(vol_type) + + # if size field present - override disk size specified in DB + size = int(node.get('size', + vol_type['extra_specs'].get('drive_size'))) + + if shared: + part_size = FLAGS.vsa_part_size_gb + total_capacity = num_disks * size + num_volumes = total_capacity / part_size + size = part_size + else: + num_volumes = num_disks + size = 0 # special handling for full drives + + for i in range(num_volumes): + volume_name = "drive-%03d" % first_index + first_index += 1 + volume_desc = 'BE volume for VSA %s type %s' % \ + (vsa_name, name) + volume = { + 'size': size, + 'name': volume_name, + 'description': volume_desc, + 'volume_type_id': vol_type['id'], + } + volume_params.append(volume) + + return volume_params + + def create(self, context, display_name='', display_description='', + vc_count=1, instance_type=None, image_name=None, + availability_zone=None, storage=[], shared=None): + """ + Provision VSA instance with corresponding compute instances + and associated volumes + :param storage: List of dictionaries with following keys: + disk_name, num_disks, size + :param shared: Specifies if storage is dedicated or shared. + For shared storage disks split into partitions + """ + + LOG.info(_("*** Experimental VSA code ***")) + + if vc_count > FLAGS.max_vcs_in_vsa: + LOG.warning(_("Requested number of VCs (%d) is too high."\ + " Setting to default"), vc_count) + vc_count = FLAGS.max_vcs_in_vsa + + if instance_type is None: + instance_type = self._get_default_vsa_instance_type() + + if availability_zone is None: + availability_zone = FLAGS.storage_availability_zone + + if storage is None: + storage = [] + + if shared is None or shared == 'False' or shared == False: + shared = False + else: + shared = True + + # check if image is ready before starting any work + if image_name is None: + image_name = FLAGS.vc_image_name + try: + image_service = self.compute_api.image_service + vc_image = image_service.show_by_name(context, image_name) + vc_image_href = vc_image['id'] + except exception.ImageNotFound: + raise exception.ApiError(_("Failed to find configured image %s") + % image_name) + + options = { + 'display_name': display_name, + 'display_description': display_description, + 'project_id': context.project_id, + 'availability_zone': availability_zone, + 'instance_type_id': instance_type['id'], + 'image_ref': vc_image_href, + 'vc_count': vc_count, + 'status': VsaState.CREATING, + } + LOG.info(_("Creating VSA: %s") % options) + + # create DB entry for VSA instance + try: + vsa_ref = self.db.vsa_create(context, options) + except exception.Error: + raise exception.ApiError(_(sys.exc_info()[1])) + vsa_id = vsa_ref['id'] + vsa_name = vsa_ref['name'] + + # check storage parameters + try: + volume_params = self._check_storage_parameters(context, vsa_name, + storage, shared) + except exception.ApiError: + self.db.vsa_destroy(context, vsa_id) + raise exception.ApiError(_("Error in storage parameters: %s") + % storage) + + # after creating DB entry, re-check and set some defaults + updates = {} + if (not hasattr(vsa_ref, 'display_name') or + vsa_ref.display_name is None or + vsa_ref.display_name == ''): + updates['display_name'] = display_name = vsa_name + updates['vol_count'] = len(volume_params) + vsa_ref = self.update(context, vsa_id, **updates) + + # create volumes + if FLAGS.vsa_multi_vol_creation: + if len(volume_params) > 0: + request_spec = { + 'num_volumes': len(volume_params), + 'vsa_id': str(vsa_id), + 'volumes': volume_params, + } + + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_volumes", + "args": {"topic": FLAGS.volume_topic, + "request_spec": request_spec, + "availability_zone": availability_zone}}) + else: + # create BE volumes one-by-one + for vol in volume_params: + try: + vol_name = vol['name'] + vol_size = vol['size'] + vol_type_id = vol['volume_type_id'] + LOG.debug(_("VSA ID %(vsa_id)d %(vsa_name)s: Create "\ + "volume %(vol_name)s, %(vol_size)d GB, "\ + "type %(vol_type_id)s"), locals()) + + vol_type = volume_types.get_volume_type(context, + vol['volume_type_id']) + + vol_ref = self.volume_api.create(context, + vol_size, + None, + vol_name, + vol['description'], + volume_type=vol_type, + metadata=dict(to_vsa_id=str(vsa_id)), + availability_zone=availability_zone) + except: + self.update_vsa_status(context, vsa_id, + status=VsaState.PARTIAL) + raise + + if len(volume_params) == 0: + # No BE volumes - ask VSA manager to start VCs + rpc.cast(context, + FLAGS.vsa_topic, + {"method": "create_vsa", + "args": {"vsa_id": str(vsa_id)}}) + + return vsa_ref + + def update_vsa_status(self, context, vsa_id, status): + updates = dict(status=status) + LOG.info(_("VSA ID %(vsa_id)d: Update VSA status to %(status)s"), + locals()) + return self.update(context, vsa_id, **updates) + + def update(self, context, vsa_id, **kwargs): + """Updates the VSA instance in the datastore. + + :param context: The security context + :param vsa_id: ID of the VSA instance to update + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + updated + + :returns: None + """ + LOG.info(_("VSA ID %(vsa_id)d: Update VSA call"), locals()) + + updatable_fields = ['status', 'vc_count', 'vol_count', + 'display_name', 'display_description'] + changes = {} + for field in updatable_fields: + if field in kwargs: + changes[field] = kwargs[field] + + vc_count = kwargs.get('vc_count', None) + if vc_count is not None: + # VP-TODO: This request may want to update number of VCs + # Get number of current VCs and add/delete VCs appropriately + vsa = self.get(context, vsa_id) + vc_count = int(vc_count) + if vc_count > FLAGS.max_vcs_in_vsa: + LOG.warning(_("Requested number of VCs (%d) is too high."\ + " Setting to default"), vc_count) + vc_count = FLAGS.max_vcs_in_vsa + + if vsa['vc_count'] != vc_count: + self.update_num_vcs(context, vsa, vc_count) + changes['vc_count'] = vc_count + + return self.db.vsa_update(context, vsa_id, changes) + + def update_num_vcs(self, context, vsa, vc_count): + vsa_name = vsa['name'] + old_vc_count = int(vsa['vc_count']) + if vc_count > old_vc_count: + add_cnt = vc_count - old_vc_count + LOG.debug(_("Adding %(add_cnt)s VCs to VSA %(vsa_name)s."), + locals()) + # VP-TODO: actual code for adding new VCs + + elif vc_count < old_vc_count: + del_cnt = old_vc_count - vc_count + LOG.debug(_("Deleting %(del_cnt)s VCs from VSA %(vsa_name)s."), + locals()) + # VP-TODO: actual code for deleting extra VCs + + def _force_volume_delete(self, ctxt, volume): + """Delete a volume, bypassing the check that it must be available.""" + host = volume['host'] + if not host: + # Deleting volume from database and skipping rpc. + self.db.volume_destroy(ctxt, volume['id']) + return + + rpc.cast(ctxt, + self.db.queue_get_for(ctxt, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume['id']}}) + + def delete_vsa_volumes(self, context, vsa_id, direction, + force_delete=True): + if direction == "FE": + volumes = self.get_all_vsa_volumes(context, vsa_id) + else: + volumes = self.get_all_vsa_drives(context, vsa_id) + + for volume in volumes: + try: + vol_name = volume['name'] + LOG.info(_("VSA ID %(vsa_id)s: Deleting %(direction)s "\ + "volume %(vol_name)s"), locals()) + self.volume_api.delete(context, volume['id']) + except exception.ApiError: + LOG.info(_("Unable to delete volume %s"), volume['name']) + if force_delete: + LOG.info(_("VSA ID %(vsa_id)s: Forced delete. "\ + "%(direction)s volume %(vol_name)s"), locals()) + self._force_volume_delete(context, volume) + + def delete(self, context, vsa_id): + """Terminate a VSA instance.""" + LOG.info(_("Going to try to terminate VSA ID %s"), vsa_id) + + # Delete all FrontEnd and BackEnd volumes + self.delete_vsa_volumes(context, vsa_id, "FE", force_delete=True) + self.delete_vsa_volumes(context, vsa_id, "BE", force_delete=True) + + # Delete all VC instances + instances = self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(vsa_id))}) + for instance in instances: + name = instance['name'] + LOG.debug(_("VSA ID %(vsa_id)s: Delete instance %(name)s"), + locals()) + self.compute_api.delete(context, instance['id']) + + # Delete VSA instance + self.db.vsa_destroy(context, vsa_id) + + def get(self, context, vsa_id): + rv = self.db.vsa_get(context, vsa_id) + return rv + + def get_all(self, context): + if context.is_admin: + return self.db.vsa_get_all(context) + return self.db.vsa_get_all_by_project(context, context.project_id) + + def get_vsa_volume_type(self, context): + name = FLAGS.vsa_volume_type_name + try: + vol_type = volume_types.get_volume_type_by_name(context, name) + except exception.NotFound: + volume_types.create(context, name, + extra_specs=dict(type='vsa_volume')) + vol_type = volume_types.get_volume_type_by_name(context, name) + + return vol_type + + def get_all_vsa_instances(self, context, vsa_id): + return self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(vsa_id))}) + + def get_all_vsa_volumes(self, context, vsa_id): + return self.volume_api.get_all(context, + search_opts={'metadata': dict(from_vsa_id=str(vsa_id))}) + + def get_all_vsa_drives(self, context, vsa_id): + return self.volume_api.get_all(context, + search_opts={'metadata': dict(to_vsa_id=str(vsa_id))}) diff --git a/nova/vsa/connection.py b/nova/vsa/connection.py new file mode 100644 index 000000000..8ac8a1dd5 --- /dev/null +++ b/nova/vsa/connection.py @@ -0,0 +1,25 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Abstraction of the underlying connection to VC.""" + +from nova.vsa import fake + + +def get_connection(): + # Return an object that is able to talk to VCs + return fake.FakeVcConnection() diff --git a/nova/vsa/fake.py b/nova/vsa/fake.py new file mode 100644 index 000000000..d4248ca01 --- /dev/null +++ b/nova/vsa/fake.py @@ -0,0 +1,22 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class FakeVcConnection(object): + + def init_host(self, host): + pass diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py new file mode 100644 index 000000000..d4c414106 --- /dev/null +++ b/nova/vsa/manager.py @@ -0,0 +1,179 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all processes relating to Virtual Storage Arrays (VSA). + +**Related Flags** + +""" + +from nova import compute +from nova import exception +from nova import flags +from nova import log as logging +from nova import manager +from nova import volume +from nova import vsa +from nova import utils +from nova.compute import instance_types +from nova.vsa import utils as vsa_utils +from nova.vsa.api import VsaState + +FLAGS = flags.FLAGS +flags.DEFINE_string('vsa_driver', 'nova.vsa.connection.get_connection', + 'Driver to use for controlling VSAs') + +LOG = logging.getLogger('nova.vsa.manager') + + +class VsaManager(manager.SchedulerDependentManager): + """Manages Virtual Storage Arrays (VSAs).""" + + def __init__(self, vsa_driver=None, *args, **kwargs): + if not vsa_driver: + vsa_driver = FLAGS.vsa_driver + self.driver = utils.import_object(vsa_driver) + self.compute_manager = utils.import_object(FLAGS.compute_manager) + + self.compute_api = compute.API() + self.volume_api = volume.API() + self.vsa_api = vsa.API() + + if FLAGS.vsa_ec2_user_id is None or \ + FLAGS.vsa_ec2_access_key is None: + raise exception.VSANovaAccessParamNotFound() + + super(VsaManager, self).__init__(*args, **kwargs) + + def init_host(self): + self.driver.init_host(host=self.host) + super(VsaManager, self).init_host() + + @exception.wrap_exception() + def create_vsa(self, context, vsa_id): + """Called by API if there were no BE volumes assigned""" + LOG.debug(_("Create call received for VSA %s"), vsa_id) + + vsa_id = int(vsa_id) # just in case + + try: + vsa = self.vsa_api.get(context, vsa_id) + except Exception as ex: + msg = _("Failed to find VSA %(vsa_id)d") % locals() + LOG.exception(msg) + return + + return self._start_vcs(context, vsa) + + @exception.wrap_exception() + def vsa_volume_created(self, context, vol_id, vsa_id, status): + """Callback for volume creations""" + LOG.debug(_("VSA ID %(vsa_id)s: Drive %(vol_id)s created. "\ + "Status %(status)s"), locals()) + vsa_id = int(vsa_id) # just in case + + # Get all volumes for this VSA + # check if any of them still in creating phase + drives = self.vsa_api.get_all_vsa_drives(context, vsa_id) + for drive in drives: + if drive['status'] == 'creating': + vol_name = drive['name'] + vol_disp_name = drive['display_name'] + LOG.debug(_("Drive %(vol_name)s (%(vol_disp_name)s) still "\ + "in creating phase - wait"), locals()) + return + + try: + vsa = self.vsa_api.get(context, vsa_id) + except Exception as ex: + msg = _("Failed to find VSA %(vsa_id)d") % locals() + LOG.exception(msg) + return + + if len(drives) != vsa['vol_count']: + cvol_real = len(drives) + cvol_exp = vsa['vol_count'] + LOG.debug(_("VSA ID %(vsa_id)d: Not all volumes are created "\ + "(%(cvol_real)d of %(cvol_exp)d)"), locals()) + return + + # all volumes created (successfully or not) + return self._start_vcs(context, vsa, drives) + + def _start_vcs(self, context, vsa, drives=[]): + """Start VCs for VSA """ + + vsa_id = vsa['id'] + if vsa['status'] == VsaState.CREATING: + self.vsa_api.update_vsa_status(context, vsa_id, + VsaState.LAUNCHING) + else: + return + + # in _separate_ loop go over all volumes and mark as "attached" + has_failed_volumes = False + for drive in drives: + vol_name = drive['name'] + vol_disp_name = drive['display_name'] + status = drive['status'] + LOG.info(_("VSA ID %(vsa_id)d: Drive %(vol_name)s "\ + "(%(vol_disp_name)s) is in %(status)s state"), + locals()) + if status == 'available': + try: + # self.volume_api.update(context, volume['id'], + # dict(attach_status="attached")) + pass + except Exception as ex: + msg = _("Failed to update attach status for volume " + "%(vol_name)s. %(ex)s") % locals() + LOG.exception(msg) + else: + has_failed_volumes = True + + if has_failed_volumes: + LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals()) + self.vsa_api.delete_vsa_volumes(context, vsa_id, "BE", True) + self.vsa_api.update_vsa_status(context, vsa_id, + VsaState.FAILED) + return + + # create user-data record for VC + storage_data = vsa_utils.generate_user_data(vsa, drives) + + instance_type = instance_types.get_instance_type( + vsa['instance_type_id']) + + # now start the VC instance + + vc_count = vsa['vc_count'] + LOG.info(_("VSA ID %(vsa_id)d: Start %(vc_count)d instances"), + locals()) + vc_instances = self.compute_api.create(context, + instance_type, # vsa['vsa_instance_type'], + vsa['image_ref'], + min_count=1, + max_count=vc_count, + display_name='vc-' + vsa['display_name'], + display_description='VC for VSA ' + vsa['display_name'], + availability_zone=vsa['availability_zone'], + user_data=storage_data, + metadata=dict(vsa_id=str(vsa_id))) + + self.vsa_api.update_vsa_status(context, vsa_id, + VsaState.CREATED) diff --git a/nova/vsa/utils.py b/nova/vsa/utils.py new file mode 100644 index 000000000..1de341ac5 --- /dev/null +++ b/nova/vsa/utils.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +from xml.etree import ElementTree + +from nova import flags + +FLAGS = flags.FLAGS + + +def generate_user_data(vsa, volumes): + SubElement = ElementTree.SubElement + + e_vsa = ElementTree.Element("vsa") + + e_vsa_detail = SubElement(e_vsa, "id") + e_vsa_detail.text = str(vsa['id']) + e_vsa_detail = SubElement(e_vsa, "name") + e_vsa_detail.text = vsa['display_name'] + e_vsa_detail = SubElement(e_vsa, "description") + e_vsa_detail.text = vsa['display_description'] + e_vsa_detail = SubElement(e_vsa, "vc_count") + e_vsa_detail.text = str(vsa['vc_count']) + + e_vsa_detail = SubElement(e_vsa, "auth_user") + e_vsa_detail.text = FLAGS.vsa_ec2_user_id + e_vsa_detail = SubElement(e_vsa, "auth_access_key") + e_vsa_detail.text = FLAGS.vsa_ec2_access_key + + e_volumes = SubElement(e_vsa, "volumes") + for volume in volumes: + + loc = volume['provider_location'] + if loc is None: + ip = '' + iscsi_iqn = '' + iscsi_portal = '' + else: + (iscsi_target, _sep, iscsi_iqn) = loc.partition(" ") + (ip, iscsi_portal) = iscsi_target.split(":", 1) + + e_vol = SubElement(e_volumes, "volume") + e_vol_detail = SubElement(e_vol, "id") + e_vol_detail.text = str(volume['id']) + e_vol_detail = SubElement(e_vol, "name") + e_vol_detail.text = volume['name'] + e_vol_detail = SubElement(e_vol, "display_name") + e_vol_detail.text = volume['display_name'] + e_vol_detail = SubElement(e_vol, "size_gb") + e_vol_detail.text = str(volume['size']) + e_vol_detail = SubElement(e_vol, "status") + e_vol_detail.text = volume['status'] + e_vol_detail = SubElement(e_vol, "ip") + e_vol_detail.text = ip + e_vol_detail = SubElement(e_vol, "iscsi_iqn") + e_vol_detail.text = iscsi_iqn + e_vol_detail = SubElement(e_vol, "iscsi_portal") + e_vol_detail.text = iscsi_portal + e_vol_detail = SubElement(e_vol, "lun") + e_vol_detail.text = '0' + e_vol_detail = SubElement(e_vol, "sn_host") + e_vol_detail.text = volume['host'] + + _xml = ElementTree.tostring(e_vsa) + return base64.b64encode(_xml) diff --git a/run_tests.sh b/run_tests.sh index 871332b4a..c1fda4cf9 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,5 +1,7 @@ #!/bin/bash +set -eu + function usage { echo "Usage: $0 [OPTION]..." echo "Run Nova's test suite(s)" @@ -24,13 +26,13 @@ function usage { function process_option { case "$1" in -h|--help) usage;; - -V|--virtual-env) let always_venv=1; let never_venv=0;; - -N|--no-virtual-env) let always_venv=0; let never_venv=1;; - -r|--recreate-db) let recreate_db=1;; - -n|--no-recreate-db) let recreate_db=0;; - -f|--force) let force=1;; - -p|--pep8) let just_pep8=1;; - -c|--coverage) let coverage=1;; + -V|--virtual-env) always_venv=1; never_venv=0;; + -N|--no-virtual-env) always_venv=0; never_venv=1;; + -r|--recreate-db) recreate_db=1;; + -n|--no-recreate-db) recreate_db=0;; + -f|--force) force=1;; + -p|--pep8) just_pep8=1;; + -c|--coverage) coverage=1;; -*) noseopts="$noseopts $1";; *) noseargs="$noseargs $1" esac @@ -130,7 +132,7 @@ if [ $recreate_db -eq 1 ]; then rm -f tests.sqlite fi -run_tests || exit +run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, # not when we're running tests individually. To handle this, we need to diff --git a/tools/pip-requires b/tools/pip-requires index 4949b66ca..a4af326dc 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -8,6 +8,7 @@ anyjson==0.2.4 boto==1.9b carrot==0.10.5 eventlet +kombu lockfile==0.8 lxml==2.3 python-novaclient==2.6.0 |
