From b814f9fef3efa1bdcb7e03a9161e08721b7bc8c4 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 15 Jul 2011 17:56:27 -0700 Subject: VSA: first cut. merged with 1279 --- Authors | 1 + bin/nova-api | 0 bin/nova-manage | 250 ++++++++++- bin/nova-vncproxy | 0 contrib/nova.sh | 0 nova/CA/newcerts/.placeholder | 0 nova/CA/private/.placeholder | 0 nova/CA/projects/.gitignore | 1 - nova/CA/projects/.placeholder | 0 nova/CA/reqs/.gitignore | 1 - nova/CA/reqs/.placeholder | 0 nova/api/ec2/__init__.py | 4 + nova/api/ec2/cloud.py | 164 ++++++- nova/api/openstack/contrib/drive_types.py | 147 ++++++ .../openstack/contrib/virtual_storage_arrays.py | 454 +++++++++++++++++++ nova/api/openstack/contrib/volumes.py | 14 +- nova/compute/api.py | 10 +- nova/db/api.py | 88 +++- nova/db/sqlalchemy/api.py | 291 ++++++++++++ .../migrate_repo/versions/032_add_vsa_data.py | 152 +++++++ nova/db/sqlalchemy/migration.py | 3 +- nova/db/sqlalchemy/models.py | 95 ++++ nova/exception.py | 20 + nova/flags.py | 27 ++ nova/quota.py | 4 +- nova/scheduler/vsa.py | 495 +++++++++++++++++++++ nova/tests/test_libvirt.py | 2 +- nova/volume/api.py | 46 +- nova/volume/driver.py | 20 +- nova/volume/manager.py | 121 ++++- nova/volume/san.py | 323 +++++++++++++- nova/vsa/__init__.py | 18 + nova/vsa/api.py | 407 +++++++++++++++++ nova/vsa/connection.py | 25 ++ nova/vsa/fake.py | 22 + nova/vsa/manager.py | 172 +++++++ plugins/xenserver/xenapi/etc/xapi.d/plugins/agent | 0 tools/clean-vlans | 0 tools/nova-debug | 0 39 files changed, 3328 insertions(+), 49 deletions(-) mode change 100755 => 100644 bin/nova-api mode change 100755 => 100644 bin/nova-vncproxy mode change 100755 => 100644 contrib/nova.sh delete mode 100644 nova/CA/newcerts/.placeholder delete mode 100644 nova/CA/private/.placeholder delete mode 100644 nova/CA/projects/.gitignore delete mode 100644 nova/CA/projects/.placeholder delete mode 100644 nova/CA/reqs/.gitignore delete mode 100644 nova/CA/reqs/.placeholder create mode 100644 nova/api/openstack/contrib/drive_types.py create mode 100644 nova/api/openstack/contrib/virtual_storage_arrays.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/032_add_vsa_data.py create mode 100644 nova/scheduler/vsa.py create mode 100644 nova/vsa/__init__.py create mode 100644 nova/vsa/api.py create mode 100644 nova/vsa/connection.py create mode 100644 nova/vsa/fake.py create mode 100644 nova/vsa/manager.py mode change 100755 => 100644 plugins/xenserver/xenapi/etc/xapi.d/plugins/agent mode change 100755 => 100644 tools/clean-vlans mode change 100755 => 100644 tools/nova-debug diff --git a/Authors b/Authors index 8ffb7d8d4..d6dfe7615 100644 --- a/Authors +++ b/Authors @@ -95,6 +95,7 @@ Tushar Patil Vasiliy Shlykov Vishvananda Ishaya Vivek Y S +Vladimir Popovski William Wolf Yoshiaki Tamura Youcef Laribi diff --git a/bin/nova-api b/bin/nova-api old mode 100755 new mode 100644 diff --git a/bin/nova-manage b/bin/nova-manage index b892d958a..4cf27ec8c 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -62,6 +62,10 @@ import sys import time +import tempfile +import zipfile +import ast + # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), @@ -84,6 +88,7 @@ from nova import rpc from nova import utils from nova import version from nova.api.ec2 import ec2utils +from nova.api.ec2 import cloud from nova.auth import manager from nova.cloudpipe import pipelib from nova.compute import instance_types @@ -870,6 +875,243 @@ class VersionCommands(object): (version.version_string(), version.version_string_with_vcs()) +class VsaCommands(object): + """Methods for dealing with VSAs""" + + def __init__(self, *args, **kwargs): + self.controller = cloud.CloudController() + self.manager = manager.AuthManager() + + # VP-TMP Solution for APIs. Project should be provided per API call + #self.context = context.get_admin_context() + + try: + project = self.manager.get_projects().pop() + except IndexError: + print (_("No projects defined")) + raise + + self.context = context.RequestContext(user=project.project_manager, + project=project) + + def _list(self, vsas): + format_str = "%-5s %-15s %-25s %-30s %-5s %-10s %-10s %-10s %10s" + if len(vsas): + print format_str %\ + (_('ID'), + _('vsa_id'), + _('displayName'), + _('description'), + _('count'), + _('vc_type'), + _('status'), + _('AZ'), + _('createTime')) + + for vsa in vsas: + print format_str %\ + (vsa['vsaId'], + vsa['name'], + vsa['displayName'], + vsa['displayDescription'], + vsa['vcCount'], + vsa['vcType'], + vsa['status'], + vsa['availabilityZone'], + str(vsa['createTime'])) + + def create(self, storage='[]', name=None, description=None, vc_count=1, + instance_type_name=None, image_name=None, shared=None, + az=None): + """Create a VSA. + args: [storage] [name] [description] [vc_count] + [instance_type] [image_name] [--shared|--full_drives] + [availability_zone] + + where is a string representing list of dictionaries + in the following format: + [{'drive_name': 'type', 'num_drives': N, 'size': M},..] + """ + + # Sanity check for storage string + storage_list = [] + if storage is not None: + try: + storage_list = ast.literal_eval(storage) + except: + print _("Invalid string format %s") % storage + raise + + for node in storage_list: + if ('drive_name' not in node) or ('num_drives' not in node): + print (_("Invalid string format for element %s. " \ + "Expecting keys 'drive_name' & 'num_drives'"), + str(node)) + raise KeyError + + if instance_type_name == '': + instance_type_name = None + + if shared is None or shared == "--full_drives": + shared = False + elif shared == "--shared": + shared = True + else: + raise ValueError(_('Shared parameter should be set either to "\ + "--shared or --full_drives')) + + values = { + 'display_name': name, + 'display_description': description, + 'vc_count': int(vc_count), + 'vc_type': instance_type_name, + 'image_name': image_name, + 'storage': storage_list, + 'shared': shared, + 'placement': {'AvailabilityZone': az} + } + + result = self.controller.create_vsa(self.context, **values) + self._list(result['vsaSet']) + + def update(self, vsa_id, name=None, description=None, vc_count=None): + """Updates name/description of vsa and number of VCs + args: vsa_id [display_name] [display_description] [vc_count]""" + + values = {} + if name is not None: + values['display_name'] = name + if description is not None: + values['display_description'] = description + if vc_count is not None: + values['vc_count'] = int(vc_count) + + self.controller.update_vsa(self.context, vsa_id, **values) + + def delete(self, vsa_id): + """Delete a vsa + args: vsa_id""" + + self.controller.delete_vsa(self.context, vsa_id) + + def list(self, vsa_id=None): + """Describe all available VSAs (or particular one) + args: [vsa_id]""" + + if vsa_id is not None: + vsa_id = [vsa_id] + + result = self.controller.describe_vsas(self.context, vsa_id) + self._list(result['vsaSet']) + + +class VsaDriveTypeCommands(object): + """Methods for dealing with VSA drive types""" + + def __init__(self, *args, **kwargs): + super(VsaDriveTypeCommands, self).__init__(*args, **kwargs) + + def _list(self, drives): + format_str = "%-5s %-30s %-10s %-10s %-10s %-20s %-10s %s" + if len(drives): + print format_str %\ + (_('ID'), + _('name'), + _('type'), + _('size_gb'), + _('rpm'), + _('capabilities'), + _('visible'), + _('createTime')) + + for drive in drives: + print format_str %\ + (str(drive['id']), + drive['name'], + drive['type'], + str(drive['size_gb']), + drive['rpm'], + drive['capabilities'], + str(drive['visible']), + str(drive['created_at'])) + + def create(self, type, size_gb, rpm, capabilities='', + visible=None, name=None): + """Create drive type. + args: type size_gb rpm [capabilities] [--show|--hide] [custom_name] + """ + + if visible is None or visible == "--show": + visible = True + elif visible == "--hide": + visible = False + else: + raise ValueError(_('Visible parameter should be set to --show '\ + 'or --hide')) + + values = { + 'type': type, + 'size_gb': int(size_gb), + 'rpm': rpm, + 'capabilities': capabilities, + 'visible': visible, + 'name': name + } + result = self.controller.create_drive_type(context.get_admin_context(), + **values) + self._list(result['driveTypeSet']) + + def delete(self, name): + """Delete drive type + args: name""" + + self.controller.delete_drive_type(context.get_admin_context(), name) + + def rename(self, name, new_name=None): + """Rename drive type + args: name [new_name]""" + + self.controller.rename_drive_type(context.get_admin_context(), + name, new_name) + + def list(self, visible=None, name=None): + """Describe all available VSA drive types (or particular one) + args: [--all] [drive_name]""" + + visible = False if visible == "--all" else True + + if name is not None: + name = [name] + + result = self.controller.describe_drive_types( + context.get_admin_context(), name, visible) + self._list(result['driveTypeSet']) + + def update(self, name, type=None, size_gb=None, rpm=None, + capabilities='', visible=None): + """Update drive type. + args: name [type] [size_gb] [rpm] [capabilities] [--show|--hide] + """ + + if visible is None or visible == "--show": + visible = True + elif visible == "--hide": + visible = False + else: + raise ValueError(_('Visible parameter should be set to --show '\ + 'or --hide')) + + values = { + 'type': type, + 'size_gb': size_gb, + 'rpm': rpm, + 'capabilities': capabilities, + 'visible': visible + } + self.controller.update_drive_type(context.get_admin_context(), + name, **values) + + class VolumeCommands(object): """Methods for dealing with a cloud in an odd state""" @@ -1214,6 +1456,7 @@ CATEGORIES = [ ('agent', AgentBuildCommands), ('config', ConfigCommands), ('db', DbCommands), + ('drive', VsaDriveTypeCommands), ('fixed', FixedIpCommands), ('flavor', InstanceTypeCommands), ('floating', FloatingIpCommands), @@ -1229,7 +1472,8 @@ CATEGORIES = [ ('version', VersionCommands), ('vm', VmCommands), ('volume', VolumeCommands), - ('vpn', VpnCommands)] + ('vpn', VpnCommands), + ('vsa', VsaCommands)] def lazy_match(name, key_value_tuples): @@ -1295,6 +1539,10 @@ def main(): action, fn = matches[0] # call the action with the remaining arguments try: + for arg in sys.argv: + if arg == '-h' or arg == '--help': + print "%s %s: %s" % (category, action, fn.__doc__) + sys.exit(0) fn(*argv) sys.exit(0) except TypeError: diff --git a/bin/nova-vncproxy b/bin/nova-vncproxy old mode 100755 new mode 100644 diff --git a/contrib/nova.sh b/contrib/nova.sh old mode 100755 new mode 100644 diff --git a/nova/CA/newcerts/.placeholder b/nova/CA/newcerts/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/nova/CA/private/.placeholder b/nova/CA/private/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/nova/CA/projects/.gitignore b/nova/CA/projects/.gitignore deleted file mode 100644 index 72e8ffc0d..000000000 --- a/nova/CA/projects/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* diff --git a/nova/CA/projects/.placeholder b/nova/CA/projects/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/nova/CA/reqs/.gitignore b/nova/CA/reqs/.gitignore deleted file mode 100644 index 72e8ffc0d..000000000 --- a/nova/CA/reqs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -* diff --git a/nova/CA/reqs/.placeholder b/nova/CA/reqs/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 890d57fe7..ec44c02ef 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -269,6 +269,10 @@ class Authorizer(wsgi.Middleware): 'DescribeImageAttribute': ['all'], 'ModifyImageAttribute': ['projectmanager', 'sysadmin'], 'UpdateImage': ['projectmanager', 'sysadmin'], + 'CreateVsa': ['projectmanager', 'sysadmin'], + 'DeleteVsa': ['projectmanager', 'sysadmin'], + 'DescribeVsas': ['projectmanager', 'sysadmin'], + 'DescribeDriveTypes': ['projectmanager', 'sysadmin'], }, 'AdminController': { # All actions have the same permission: ['none'] (the default) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index acfd1361c..786ceaccc 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -42,6 +42,8 @@ from nova import network from nova import rpc from nova import utils from nova import volume +from nova import vsa +from nova.vsa import drive_types from nova.api.ec2 import ec2utils from nova.compute import instance_types from nova.image import s3 @@ -87,6 +89,7 @@ class CloudController(object): self.compute_api = compute.API( network_api=self.network_api, volume_api=self.volume_api) + self.vsa_api = vsa.API(compute_api=self.compute_api) self.setup() def __str__(self): @@ -727,12 +730,26 @@ class CloudController(object): snapshot_id = None LOG.audit(_("Create volume of %s GB"), size, context=context) + to_vsa_id = kwargs.get('to_vsa_id', None) + if to_vsa_id: + to_vsa_id = ec2utils.ec2_id_to_id(to_vsa_id) + + from_vsa_id = kwargs.get('from_vsa_id', None) + if from_vsa_id: + from_vsa_id = ec2utils.ec2_id_to_id(from_vsa_id) + + if to_vsa_id or from_vsa_id: + LOG.audit(_("Create volume of %s GB associated with VSA "\ + "(to: %d, from: %d)"), + size, to_vsa_id, from_vsa_id, context=context) + volume = self.volume_api.create( context, size=size, snapshot_id=snapshot_id, name=kwargs.get('display_name'), - description=kwargs.get('display_description')) + description=kwargs.get('display_description'), + to_vsa_id=to_vsa_id, from_vsa_id=from_vsa_id) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. @@ -786,6 +803,151 @@ class CloudController(object): 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')} + def _format_vsa(self, context, p_vsa): + vsa = {} + vsa['vsaId'] = p_vsa['id'] + vsa['status'] = p_vsa['status'] + vsa['availabilityZone'] = p_vsa['availability_zone'] + vsa['createTime'] = p_vsa['created_at'] + vsa['name'] = p_vsa['name'] + vsa['displayName'] = p_vsa['display_name'] + vsa['displayDescription'] = p_vsa['display_description'] + vsa['vcCount'] = p_vsa['vc_count'] + if p_vsa['vsa_instance_type']: + vsa['vcType'] = p_vsa['vsa_instance_type'].get('name', None) + else: + vsa['vcType'] = None + return vsa + + def create_vsa(self, context, **kwargs): + display_name = kwargs.get('display_name') + display_description = kwargs.get('display_description') + vc_count = int(kwargs.get('vc_count', 1)) + instance_type = instance_types.get_instance_type_by_name( + kwargs.get('vc_type', FLAGS.default_vsa_instance_type)) + image_name = kwargs.get('image_name') + availability_zone = kwargs.get('placement', {}).get( + 'AvailabilityZone') + #storage = ast.literal_eval(kwargs.get('storage', '[]')) + storage = kwargs.get('storage', []) + shared = kwargs.get('shared', False) + + vc_type = instance_type['name'] + _storage = str(storage) + LOG.audit(_("Create VSA %(display_name)s vc_count:%(vc_count)d "\ + "vc_type:%(vc_type)s storage:%(_storage)s"), locals()) + + vsa = self.vsa_api.create(context, display_name, display_description, + vc_count, instance_type, image_name, + availability_zone, storage, shared) + return {'vsaSet': [self._format_vsa(context, vsa)]} + + def update_vsa(self, context, vsa_id, **kwargs): + LOG.audit(_("Update VSA %s"), vsa_id) + updatable_fields = ['display_name', 'display_description', 'vc_count'] + changes = {} + for field in updatable_fields: + if field in kwargs: + changes[field] = kwargs[field] + if changes: + vsa_id = ec2utils.ec2_id_to_id(vsa_id) + self.vsa_api.update(context, vsa_id=vsa_id, **changes) + return True + + def delete_vsa(self, context, vsa_id, **kwargs): + LOG.audit(_("Delete VSA %s"), vsa_id) + vsa_id = ec2utils.ec2_id_to_id(vsa_id) + + self.vsa_api.delete(context, vsa_id) + + return True + + def describe_vsas(self, context, vsa_id=None, status=None, + availability_zone=None, **kwargs): +# LOG.debug(_("vsa_id=%s, status=%s, az=%s"), +# (vsa_id, status, availability_zone)) + result = [] + vsas = [] + if vsa_id is not None: + for ec2_id in vsa_id: + internal_id = ec2utils.ec2_id_to_id(ec2_id) + vsa = self.vsa_api.get(context, internal_id) + vsas.append(vsa) + else: + vsas = self.vsa_api.get_all(context) + + if status: + result = [] + for vsa in vsas: + if vsa['status'] in status: + result.append(vsa) + vsas = result + + if availability_zone: + result = [] + for vsa in vsas: + if vsa['availability_zone'] in availability_zone: + result.append(vsa) + vsas = result + + return {'vsaSet': [self._format_vsa(context, vsa) for vsa in vsas]} + + def create_drive_type(self, context, **kwargs): + name = kwargs.get('name') + type = kwargs.get('type') + size_gb = int(kwargs.get('size_gb')) + rpm = kwargs.get('rpm') + capabilities = kwargs.get('capabilities') + visible = kwargs.get('visible', True) + + LOG.audit(_("Create Drive Type %(name)s: %(type)s %(size_gb)d "\ + "%(rpm)s %(capabilities)s %(visible)s"), + locals()) + + rv = drive_types.drive_type_create(context, type, size_gb, rpm, + capabilities, visible, name) + return {'driveTypeSet': [dict(rv)]} + + def update_drive_type(self, context, name, **kwargs): + LOG.audit(_("Update Drive Type %s"), name) + updatable_fields = ['type', + 'size_gb', + 'rpm', + 'capabilities', + 'visible'] + changes = {} + for field in updatable_fields: + if field in kwargs and \ + kwargs[field] is not None and \ + kwargs[field] != '': + changes[field] = kwargs[field] + if changes: + drive_types.drive_type_update(context, name, **changes) + return True + + def rename_drive_type(self, context, name, new_name): + drive_types.drive_type_rename(context, name, new_name) + return True + + def delete_drive_type(self, context, name): + drive_types.drive_type_delete(context, name) + return True + + def describe_drive_types(self, context, names=None, visible=True): + + drives = [] + if names is not None: + for name in names: + drive = drive_types.drive_type_get_by_name(context, name) + if drive['visible'] == visible: + drives.append(drive) + else: + drives = drive_types.drive_type_get_all(context, visible) + + # VP-TODO: Change it later to EC2 compatible func (output) + + return {'driveTypeSet': [dict(drive) for drive in drives]} + def _convert_to_set(self, lst, label): if lst is None or lst == []: return None diff --git a/nova/api/openstack/contrib/drive_types.py b/nova/api/openstack/contrib/drive_types.py new file mode 100644 index 000000000..85b3170cb --- /dev/null +++ b/nova/api/openstack/contrib/drive_types.py @@ -0,0 +1,147 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" The Drive Types extension for Virtual Storage Arrays""" + + +from webob import exc + +from nova.vsa import drive_types +from nova import db +from nova import quota +from nova import log as logging +from nova.api.openstack import common +from nova.api.openstack import extensions +from nova.api.openstack import faults +from nova.api.openstack import wsgi + +LOG = logging.getLogger("nova.api.drive_types") + + +class DriveTypeController(object): + """The Drive Type API controller for the OpenStack API.""" + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "drive_type": [ + "id", + "displayName", + "type", + "size", + "rpm", + "capabilities", + ]}}} + + def _drive_type_view(self, context, drive): + """Maps keys for drive types view.""" + d = {} + + d['id'] = drive['id'] + d['displayName'] = drive['name'] + d['type'] = drive['type'] + d['size'] = drive['size_gb'] + d['rpm'] = drive['rpm'] + d['capabilities'] = drive['capabilities'] + return d + + def index(self, req): + """Returns a list of drive types.""" + + context = req.environ['nova.context'] + drive_types = drive_types.drive_type_get_all(context) + limited_list = common.limited(drive_types, req) + res = [self._drive_type_view(context, drive) for drive in limited_list] + return {'drive_types': res} + + def show(self, req, id): + """Return data about the given drive type.""" + context = req.environ['nova.context'] + + try: + drive = drive_types.drive_type_get(context, id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + return {'drive_type': self._drive_type_view(context, drive)} + + def create(self, req, body): + """Creates a new drive type.""" + context = req.environ['nova.context'] + + if not body: + return faults.Fault(exc.HTTPUnprocessableEntity()) + + drive = body['drive_type'] + + name = drive.get('displayName') + type = drive.get('type') + size = drive.get('size') + rpm = drive.get('rpm') + capabilities = drive.get('capabilities') + + LOG.audit(_("Create drive type %(name)s for "\ + "%(type)s:%(size)s:%(rpm)s"), locals(), context=context) + + new_drive = drive_types.drive_type_create(context, + type=type, + size_gb=size, + rpm=rpm, + capabilities=capabilities, + name=name) + + return {'drive_type': self._drive_type_view(context, new_drive)} + + def delete(self, req, id): + """Deletes a drive type.""" + context = req.environ['nova.context'] + + LOG.audit(_("Delete drive type with id: %s"), id, context=context) + + try: + drive = drive_types.drive_type_get(context, id) + drive_types.drive_type_delete(context, drive['name']) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPAccepted() + + +class Drive_types(extensions.ExtensionDescriptor): + + def get_name(self): + return "DriveTypes" + + def get_alias(self): + return "zadr-drive_types" + + def get_description(self): + return "Drive Types support" + + def get_namespace(self): + return "http://docs.openstack.org/ext/drive_types/api/v1.1" + + def get_updated(self): + return "2011-06-29T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension( + 'zadr-drive_types', + DriveTypeController()) + + resources.append(res) + return resources diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py new file mode 100644 index 000000000..eca2d68dd --- /dev/null +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -0,0 +1,454 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" The virtul storage array extension""" + + +from webob import exc + +from nova import vsa +from nova import volume +from nova import db +from nova import quota +from nova import exception +from nova import log as logging +from nova.api.openstack import common +from nova.api.openstack import extensions +from nova.api.openstack import faults +from nova.api.openstack import wsgi +from nova.api.openstack.contrib import volumes +from nova.compute import instance_types + +from nova import flags +FLAGS = flags.FLAGS + +LOG = logging.getLogger("nova.api.vsa") + + +class VsaController(object): + """The Virtual Storage Array API controller for the OpenStack API.""" + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "vsa": [ + "id", + "name", + "displayName", + "displayDescription", + "createTime", + "status", + "vcType", + "vcCount", + "driveCount", + ]}}} + + def __init__(self): + self.vsa_api = vsa.API() + super(VsaController, self).__init__() + + def _vsa_view(self, context, vsa, details=False): + """Map keys for vsa summary/detailed view.""" + d = {} + + d['id'] = vsa['id'] + d['name'] = vsa['name'] + d['displayName'] = vsa['display_name'] + d['displayDescription'] = vsa['display_description'] + + d['createTime'] = vsa['created_at'] + d['status'] = vsa['status'] + + if vsa['vsa_instance_type']: + d['vcType'] = vsa['vsa_instance_type'].get('name', None) + else: + d['vcType'] = None + + d['vcCount'] = vsa['vc_count'] + d['driveCount'] = vsa['vol_count'] + + return d + + def _items(self, req, details): + """Return summary or detailed list of VSAs.""" + context = req.environ['nova.context'] + vsas = self.vsa_api.get_all(context) + limited_list = common.limited(vsas, req) + res = [self._vsa_view(context, vsa, details) for vsa in limited_list] + return {'vsaSet': res} + + def index(self, req): + """Return a short list of VSAs.""" + return self._items(req, details=False) + + def detail(self, req): + """Return a detailed list of VSAs.""" + return self._items(req, details=True) + + def show(self, req, id): + """Return data about the given VSA.""" + context = req.environ['nova.context'] + + try: + vsa = self.vsa_api.get(context, vsa_id=id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + return {'vsa': self._vsa_view(context, vsa, details=True)} + + def create(self, req, body): + """Create a new VSA.""" + context = req.environ['nova.context'] + + if not body: + return faults.Fault(exc.HTTPUnprocessableEntity()) + + vsa = body['vsa'] + + display_name = vsa.get('displayName') + display_description = vsa.get('displayDescription') + storage = vsa.get('storage') + shared = vsa.get('shared') + vc_type = vsa.get('vcType', FLAGS.default_vsa_instance_type) + availability_zone = vsa.get('placement', {}).get('AvailabilityZone') + + try: + instance_type = instance_types.get_instance_type_by_name(vc_type) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + LOG.audit(_("Create VSA %(display_name)s of type %(vc_type)s"), + locals(), context=context) + + result = self.vsa_api.create(context, + display_name=display_name, + display_description=display_description, + storage=storage, + shared=shared, + instance_type=instance_type, + availability_zone=availability_zone) + + return {'vsa': self._vsa_view(context, result, details=True)} + + def delete(self, req, id): + """Delete a VSA.""" + context = req.environ['nova.context'] + + LOG.audit(_("Delete VSA with id: %s"), id, context=context) + + try: + self.vsa_api.delete(context, vsa_id=id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPAccepted() + + +class VsaVolumeDriveController(volumes.VolumeController): + """The base class for VSA volumes & drives. + + A child resource of the VSA object. Allows operations with + volumes and drives created to/from particular VSA + + """ + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "volume": [ + "id", + "name", + "status", + "size", + "availabilityZone", + "createdAt", + "displayName", + "displayDescription", + "vsaId", + ]}}} + + def __init__(self): + # self.compute_api = compute.API() + # self.vsa_api = vsa.API() + self.volume_api = volume.API() + super(VsaVolumeDriveController, self).__init__() + + def _translation(self, context, vol, vsa_id, details): + if details: + translation = volumes.translate_volume_detail_view + else: + translation = volumes.translate_volume_summary_view + + d = translation(context, vol) + d['vsaId'] = vol[self.direction] + return d + + def _check_volume_ownership(self, context, vsa_id, id): + obj = self.object + try: + volume_ref = self.volume_api.get(context, volume_id=id) + except exception.NotFound: + LOG.error(_("%(obj)s with ID %(id)s not found"), locals()) + raise + + own_vsa_id = volume_ref[self.direction] + if own_vsa_id != int(vsa_id): + LOG.error(_("%(obj)s with ID %(id)s belongs to VSA %(own_vsa_id)s"\ + " and not to VSA %(vsa_id)s."), locals()) + raise exception.Invalid() + + def _items(self, req, vsa_id, details): + """Return summary or detailed list of volumes for particular VSA.""" + context = req.environ['nova.context'] + + vols = self.volume_api.get_all_by_vsa(context, vsa_id, + self.direction.split('_')[0]) + limited_list = common.limited(vols, req) + + res = [self._translation(context, vol, vsa_id, details) \ + for vol in limited_list] + + return {self.objects: res} + + def index(self, req, vsa_id): + """Return a short list of volumes created from particular VSA.""" + LOG.audit(_("Index. vsa_id=%(vsa_id)s"), locals()) + return self._items(req, vsa_id, details=False) + + def detail(self, req, vsa_id): + """Return a detailed list of volumes created from particular VSA.""" + LOG.audit(_("Detail. vsa_id=%(vsa_id)s"), locals()) + return self._items(req, vsa_id, details=True) + + def create(self, req, vsa_id, body): + """Create a new volume from VSA.""" + LOG.audit(_("Create. vsa_id=%(vsa_id)s, body=%(body)s"), locals()) + context = req.environ['nova.context'] + + if not body: + return faults.Fault(exc.HTTPUnprocessableEntity()) + + vol = body[self.object] + size = vol['size'] + LOG.audit(_("Create volume of %(size)s GB from VSA ID %(vsa_id)s"), + locals(), context=context) + + new_volume = self.volume_api.create(context, size, None, + vol.get('displayName'), + vol.get('displayDescription'), + from_vsa_id=vsa_id) + + return {self.object: self._translation(context, new_volume, + vsa_id, True)} + + def update(self, req, vsa_id, id, body): + """Update a volume.""" + context = req.environ['nova.context'] + + try: + self._check_volume_ownership(context, vsa_id, id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + except exception.Invalid: + return faults.Fault(exc.HTTPBadRequest()) + + vol = body[self.object] + updatable_fields = ['display_name', + 'display_description', + 'status', + 'provider_location', + 'provider_auth'] + changes = {} + for field in updatable_fields: + if field in vol: + changes[field] = vol[field] + + obj = self.object + LOG.audit(_("Update %(obj)s with id: %(id)s, changes: %(changes)s"), + locals(), context=context) + + try: + self.volume_api.update(context, volume_id=id, fields=changes) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPAccepted() + + def delete(self, req, vsa_id, id): + """Delete a volume.""" + context = req.environ['nova.context'] + + LOG.audit(_("Delete. vsa_id=%(vsa_id)s, id=%(id)s"), locals()) + + try: + self._check_volume_ownership(context, vsa_id, id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + except exception.Invalid: + return faults.Fault(exc.HTTPBadRequest()) + + return super(VsaVolumeDriveController, self).delete(req, id) + + def show(self, req, vsa_id, id): + """Return data about the given volume.""" + context = req.environ['nova.context'] + + LOG.audit(_("Show. vsa_id=%(vsa_id)s, id=%(id)s"), locals()) + + try: + self._check_volume_ownership(context, vsa_id, id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + except exception.Invalid: + return faults.Fault(exc.HTTPBadRequest()) + + return super(VsaVolumeDriveController, self).show(req, id) + + +class VsaVolumeController(VsaVolumeDriveController): + """The VSA volume API controller for the Openstack API. + + A child resource of the VSA object. Allows operations with volumes created + by particular VSA + + """ + + def __init__(self): + self.direction = 'from_vsa_id' + self.objects = 'volumes' + self.object = 'volume' + super(VsaVolumeController, self).__init__() + + +class VsaDriveController(VsaVolumeDriveController): + """The VSA Drive API controller for the Openstack API. + + A child resource of the VSA object. Allows operations with drives created + for particular VSA + + """ + + def __init__(self): + self.direction = 'to_vsa_id' + self.objects = 'drives' + self.object = 'drive' + super(VsaDriveController, self).__init__() + + def create(self, req, vsa_id, body): + """Create a new drive for VSA. Should be done through VSA APIs""" + return faults.Fault(exc.HTTPBadRequest()) + + def update(self, req, vsa_id, id, body): + """Update a drive. Should be done through VSA APIs""" + return faults.Fault(exc.HTTPBadRequest()) + + +class VsaVPoolController(object): + """The vPool VSA API controller for the OpenStack API.""" + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "vpool": [ + "id", + "vsaId", + "name", + "displayName", + "displayDescription", + "driveCount", + "driveIds", + "protection", + "stripeSize", + "stripeWidth", + "createTime", + "status", + ]}}} + + def __init__(self): + self.vsa_api = vsa.API() + super(VsaVPoolController, self).__init__() + + def index(self, req, vsa_id): + """Return a short list of vpools created from particular VSA.""" + return {'vpools': []} + + def create(self, req, vsa_id, body): + """Create a new vPool for VSA.""" + return faults.Fault(exc.HTTPBadRequest()) + + def update(self, req, vsa_id, id, body): + """Update vPool parameters.""" + return faults.Fault(exc.HTTPBadRequest()) + + def delete(self, req, vsa_id, id): + """Delete a vPool.""" + return faults.Fault(exc.HTTPBadRequest()) + + def show(self, req, vsa_id, id): + """Return data about the given vPool.""" + return faults.Fault(exc.HTTPBadRequest()) + + +class Virtual_storage_arrays(extensions.ExtensionDescriptor): + + def get_name(self): + return "VSAs" + + def get_alias(self): + return "zadr-vsa" + + def get_description(self): + return "Virtual Storage Arrays support" + + def get_namespace(self): + return "http://docs.openstack.org/ext/vsa/api/v1.1" + + def get_updated(self): + return "2011-06-29T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension( + 'zadr-vsa', + VsaController(), + collection_actions={'detail': 'GET'}, + member_actions={'add_capacity': 'POST', + 'remove_capacity': 'POST'}) + resources.append(res) + + res = extensions.ResourceExtension('volumes', + VsaVolumeController(), + collection_actions={'detail': 'GET'}, + parent=dict( + member_name='vsa', + collection_name='zadr-vsa')) + resources.append(res) + + res = extensions.ResourceExtension('drives', + VsaDriveController(), + collection_actions={'detail': 'GET'}, + parent=dict( + member_name='vsa', + collection_name='zadr-vsa')) + resources.append(res) + + res = extensions.ResourceExtension('vpools', + VsaVPoolController(), + parent=dict( + member_name='vsa', + collection_name='zadr-vsa')) + resources.append(res) + + return resources diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index e5e2c5b50..3c3d40c0f 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -33,17 +33,17 @@ LOG = logging.getLogger("nova.api.volumes") FLAGS = flags.FLAGS -def _translate_volume_detail_view(context, vol): +def translate_volume_detail_view(context, vol): """Maps keys for volumes details view.""" - d = _translate_volume_summary_view(context, vol) + d = translate_volume_summary_view(context, vol) # No additional data / lookups at the moment return d -def _translate_volume_summary_view(context, vol): +def translate_volume_summary_view(context, vol): """Maps keys for volumes summary view.""" d = {} @@ -92,7 +92,7 @@ class VolumeController(object): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return {'volume': _translate_volume_detail_view(context, vol)} + return {'volume': translate_volume_detail_view(context, vol)} def delete(self, req, id): """Delete a volume.""" @@ -108,11 +108,11 @@ class VolumeController(object): def index(self, req): """Returns a summary list of volumes.""" - return self._items(req, entity_maker=_translate_volume_summary_view) + return self._items(req, entity_maker=translate_volume_summary_view) def detail(self, req): """Returns a detailed list of volumes.""" - return self._items(req, entity_maker=_translate_volume_detail_view) + return self._items(req, entity_maker=translate_volume_detail_view) def _items(self, req, entity_maker): """Returns a list of volumes, transformed through entity_maker.""" @@ -140,7 +140,7 @@ class VolumeController(object): # Work around problem that instance is lazy-loaded... new_volume['instance'] = None - retval = _translate_volume_detail_view(context, new_volume) + retval = translate_volume_detail_view(context, new_volume) return {'volume': retval} diff --git a/nova/compute/api.py b/nova/compute/api.py index 432658bbb..a48a5bc98 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -149,7 +149,7 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None): + reservation_id=None, vsa_id=None): """Verify all the input parameters regardless of the provisioning strategy being performed.""" @@ -241,7 +241,8 @@ class API(base.Base): 'availability_zone': availability_zone, 'os_type': os_type, 'architecture': architecture, - 'vm_mode': vm_mode} + 'vm_mode': vm_mode, + 'vsa_id': vsa_id} return (num_instances, base_options) @@ -381,7 +382,8 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None, block_device_mapping=None): + reservation_id=None, block_device_mapping=None, + vsa_id=None): """ Provision the instances by sending off a series of single instance requests to the Schedulers. This is fine for trival @@ -402,7 +404,7 @@ class API(base.Base): key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, zone_blob, - reservation_id) + reservation_id, vsa_id) instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) diff --git a/nova/db/api.py b/nova/db/api.py index b7c5700e5..9147f136b 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -49,7 +49,8 @@ flags.DEFINE_string('volume_name_template', 'volume-%08x', 'Template string to be used to generate instance names') flags.DEFINE_string('snapshot_name_template', 'snapshot-%08x', 'Template string to be used to generate snapshot names') - +flags.DEFINE_string('vsa_name_template', 'vsa-%08x', + 'Template string to be used to generate VSA names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], sqlalchemy='nova.db.sqlalchemy.api') @@ -509,6 +510,13 @@ def instance_get_all_by_project(context, project_id): return IMPL.instance_get_all_by_project(context, project_id) +def instance_get_all_by_project_and_vsa(context, project_id, vsa_id): + """Get all instance spawned by a given VSA belonging to a project.""" + return IMPL.instance_get_all_by_project_and_vsa(context, + project_id, + vsa_id) + + def instance_get_all_by_host(context, host): """Get all instance belonging to a host.""" return IMPL.instance_get_all_by_host(context, host) @@ -914,6 +922,16 @@ def volume_get_all_by_project(context, project_id): return IMPL.volume_get_all_by_project(context, project_id) +def volume_get_all_assigned_to_vsa(context, vsa_id): + """Get all volumes assigned to particular VSA.""" + return IMPL.volume_get_all_assigned_to_vsa(context, vsa_id) + + +def volume_get_all_assigned_from_vsa(context, vsa_id): + """Get all volumes created from particular VSA.""" + return IMPL.volume_get_all_assigned_from_vsa(context, vsa_id) + + def volume_get_by_ec2_id(context, ec2_id): """Get a volume by ec2 id.""" return IMPL.volume_get_by_ec2_id(context, ec2_id) @@ -1422,3 +1440,71 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id, key/value pairs specified in the extra specs dict argument""" IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id, extra_specs) + + +#################### + + +def drive_type_create(context, values): + """Creates drive type record.""" + return IMPL.drive_type_create(context, values) + + +def drive_type_update(context, name, values): + """Updates drive type record.""" + return IMPL.drive_type_update(context, name, values) + + +def drive_type_destroy(context, name): + """Deletes drive type record.""" + return IMPL.drive_type_destroy(context, name) + + +def drive_type_get(context, drive_type_id): + """Get drive type record by id.""" + return IMPL.drive_type_get(context, drive_type_id) + + +def drive_type_get_by_name(context, name): + """Get drive type record by name.""" + return IMPL.drive_type_get_by_name(context, name) + + +def drive_type_get_all(context, visible=None): + """Returns all (or only visible) drive types.""" + return IMPL.drive_type_get_all(context, visible) + + +def vsa_create(context, values): + """Creates Virtual Storage Array record.""" + return IMPL.vsa_create(context, values) + + +def vsa_update(context, vsa_id, values): + """Updates Virtual Storage Array record.""" + return IMPL.vsa_update(context, vsa_id, values) + + +def vsa_destroy(context, vsa_id): + """Deletes Virtual Storage Array record.""" + return IMPL.vsa_destroy(context, vsa_id) + + +def vsa_get(context, vsa_id): + """Get Virtual Storage Array record by ID.""" + return IMPL.vsa_get(context, vsa_id) + + +def vsa_get_all(context): + """Get all Virtual Storage Array records.""" + return IMPL.vsa_get_all(context) + + +def vsa_get_all_by_project(context, project_id): + """Get all Virtual Storage Array records by project ID.""" + return IMPL.vsa_get_all_by_project(context, project_id) + + +def vsa_get_vc_ips_list(context, vsa_id): + """Retrieves IPs of instances associated with Virtual Storage Array.""" + return IMPL.vsa_get_vc_ips_list(context, vsa_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a831516a8..aa5a6e052 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1217,6 +1217,35 @@ def instance_get_all_by_project(context, project_id): all() +@require_context +def instance_get_all_by_project_and_vsa(context, project_id, vsa_id): + authorize_project_context(context, project_id) + + session = get_session() + return session.query(models.Instance).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('instance_type')).\ + filter_by(project_id=project_id).\ + filter_by(vsa_id=vsa_id).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_admin_context +def instance_get_all_by_vsa(context, vsa_id): + session = get_session() + return session.query(models.Instance).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('instance_type')).\ + filter_by(vsa_id=vsa_id).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + @require_context def instance_get_all_by_reservation(context, reservation_id): session = get_session() @@ -2018,12 +2047,14 @@ def volume_get(context, volume_id, session=None): if is_admin_context(context): result = session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('drive_type')).\ filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('drive_type')).\ filter_by(project_id=context.project_id).\ filter_by(id=volume_id).\ filter_by(deleted=False).\ @@ -2039,6 +2070,7 @@ def volume_get_all(context): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('drive_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2048,6 +2080,7 @@ def volume_get_all_by_host(context, host): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('drive_type')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2057,6 +2090,7 @@ def volume_get_all_by_host(context, host): def volume_get_all_by_instance(context, instance_id): session = get_session() result = session.query(models.Volume).\ + options(joinedload('drive_type')).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() @@ -2065,6 +2099,28 @@ def volume_get_all_by_instance(context, instance_id): return result +@require_admin_context +def volume_get_all_assigned_to_vsa(context, vsa_id): + session = get_session() + result = session.query(models.Volume).\ + options(joinedload('drive_type')).\ + filter_by(to_vsa_id=vsa_id).\ + filter_by(deleted=False).\ + all() + return result + + +@require_admin_context +def volume_get_all_assigned_from_vsa(context, vsa_id): + session = get_session() + result = session.query(models.Volume).\ + options(joinedload('drive_type')).\ + filter_by(from_vsa_id=vsa_id).\ + filter_by(deleted=False).\ + all() + return result + + @require_context def volume_get_all_by_project(context, project_id): authorize_project_context(context, project_id) @@ -2072,6 +2128,7 @@ def volume_get_all_by_project(context, project_id): session = get_session() return session.query(models.Volume).\ options(joinedload('instance')).\ + options(joinedload('drive_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2084,6 +2141,7 @@ def volume_get_instance(context, volume_id): filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ options(joinedload('instance')).\ + options(joinedload('drive_type')).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) @@ -3286,3 +3344,236 @@ def instance_type_extra_specs_update_or_create(context, instance_type_id, "deleted": 0}) spec_ref.save(session=session) return specs + + + #################### + + +@require_admin_context +def drive_type_create(context, values): + """ + Creates drive type record. + """ + try: + drive_type_ref = models.DriveTypes() + drive_type_ref.update(values) + drive_type_ref.save() + except Exception, e: + raise exception.DBError(e) + return drive_type_ref + + +@require_admin_context +def drive_type_update(context, name, values): + """ + Updates drive type record. + """ + session = get_session() + with session.begin(): + drive_type_ref = drive_type_get_by_name(context, name, session=session) + drive_type_ref.update(values) + drive_type_ref.save(session=session) + return drive_type_ref + + +@require_admin_context +def drive_type_destroy(context, name): + """ + Deletes drive type record. + """ + session = get_session() + drive_type_ref = session.query(models.DriveTypes).\ + filter_by(name=name) + records = drive_type_ref.delete() + if records == 0: + raise exception.VirtualDiskTypeNotFoundByName(name=name) + else: + return drive_type_ref + + +@require_context +def drive_type_get(context, drive_type_id, session=None): + """ + Get drive type record by id. + """ + if not session: + session = get_session() + + result = session.query(models.DriveTypes).\ + filter_by(id=drive_type_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + if not result: + raise exception.VirtualDiskTypeNotFound(id=drive_type_id) + + return result + + +@require_context +def drive_type_get_by_name(context, name, session=None): + """ + Get drive type record by name. + """ + if not session: + session = get_session() + + result = session.query(models.DriveTypes).\ + filter_by(name=name).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + if not result: + raise exception.VirtualDiskTypeNotFoundByName(name=name) + + return result + + +@require_context +def drive_type_get_all(context, visible=False): + """ + Returns all (or only visible) drive types. + """ + session = get_session() + if not visible: + drive_types = session.query(models.DriveTypes).\ + filter_by(deleted=can_read_deleted(context)).\ + order_by("name").\ + all() + else: + drive_types = session.query(models.DriveTypes).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(visible=True).\ + order_by("name").\ + all() + return drive_types + + + #################### + + +@require_admin_context +def vsa_create(context, values): + """ + Creates Virtual Storage Array record. + """ + try: + vsa_ref = models.VirtualStorageArray() + vsa_ref.update(values) + vsa_ref.save() + except Exception, e: + raise exception.DBError(e) + return vsa_ref + + +@require_admin_context +def vsa_update(context, vsa_id, values): + """ + Updates Virtual Storage Array record. + """ + session = get_session() + with session.begin(): + vsa_ref = vsa_get(context, vsa_id, session=session) + vsa_ref.update(values) + vsa_ref.save(session=session) + return vsa_ref + + +@require_admin_context +def vsa_destroy(context, vsa_id): + """ + Deletes Virtual Storage Array record. + """ + session = get_session() + with session.begin(): + #vsa_ref = vsa_get(context, vsa_id, session=session) + #vsa_ref.delete(session=session) + session.query(models.VirtualStorageArray).\ + filter_by(id=vsa_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def vsa_get(context, vsa_id, session=None): + """ + Get Virtual Storage Array record by ID. + """ + if not session: + session = get_session() + result = None + + if is_admin_context(context): + result = session.query(models.VirtualStorageArray).\ + options(joinedload('vsa_instance_type')).\ + filter_by(id=vsa_id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + elif is_user_context(context): + result = session.query(models.VirtualStorageArray).\ + options(joinedload('vsa_instance_type')).\ + filter_by(project_id=context.project_id).\ + filter_by(id=vsa_id).\ + filter_by(deleted=False).\ + first() + if not result: + raise exception.VirtualStorageArrayNotFound(id=vsa_id) + + return result + + +@require_admin_context +def vsa_get_all(context): + """ + Get all Virtual Storage Array records. + """ + session = get_session() + return session.query(models.VirtualStorageArray).\ + options(joinedload('vsa_instance_type')).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def vsa_get_all_by_project(context, project_id): + """ + Get all Virtual Storage Array records by project ID. + """ + authorize_project_context(context, project_id) + + session = get_session() + return session.query(models.VirtualStorageArray).\ + options(joinedload('vsa_instance_type')).\ + filter_by(project_id=project_id).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_context +def vsa_get_vc_ips_list(context, vsa_id): + """ + Retrieves IPs of instances associated with Virtual Storage Array. + """ + result = [] + session = get_session() + vc_instances = session.query(models.Instance).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('instance_type')).\ + filter_by(vsa_id=vsa_id).\ + filter_by(deleted=False).\ + all() + for vc_instance in vc_instances: + if vc_instance['fixed_ips']: + for fixed in vc_instance['fixed_ips']: + # insert the [floating,fixed] (if exists) in the head, + # otherwise append the [none,fixed] in the tail + ip = {} + ip['fixed'] = fixed['address'] + if fixed['floating_ips']: + ip['floating'] = fixed['floating_ips'][0]['address'] + result.append(ip) + + return result + + #################### diff --git a/nova/db/sqlalchemy/migrate_repo/versions/032_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/032_add_vsa_data.py new file mode 100644 index 000000000..7fc8f955c --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/032_add_vsa_data.py @@ -0,0 +1,152 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table +from sqlalchemy import Text, Boolean, ForeignKey + +from nova import log as logging + +meta = MetaData() + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of tables . +# + +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +vsa_id = Column('vsa_id', Integer(), nullable=True) +to_vsa_id = Column('to_vsa_id', Integer(), nullable=True) +from_vsa_id = Column('from_vsa_id', Integer(), nullable=True) +drive_type_id = Column('drive_type_id', Integer(), nullable=True) + + +# New Tables +# + +virtual_storage_arrays = Table('virtual_storage_arrays', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type_id', Integer(), nullable=False), + Column('image_ref', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vc_count', Integer(), nullable=False), + Column('vol_count', Integer(), nullable=False), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + +drive_types = Table('drive_types', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('type', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('size_gb', Integer(), nullable=False), + Column('rpm', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('capabilities', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('visible', Boolean(create_constraint=True, name=None)), + ) + +#vsa_disk_association = Table('vsa_disk_association', meta, +# Column('created_at', DateTime(timezone=False)), +# Column('updated_at', DateTime(timezone=False)), +# Column('deleted_at', DateTime(timezone=False)), +# Column('deleted', Boolean(create_constraint=True, name=None)), +# Column('id', Integer(), primary_key=True, nullable=False), +# Column('drive_type_id', Integer(), ForeignKey('drive_types.id')), +# Column('vsa_id', Integer(), ForeignKey('virtual_storage_arrays.id')), +# Column('disk_num', Integer(), nullable=False), +# ) + +#new_tables = (virtual_storage_arrays, drive_types, vsa_disk_association) +new_tables = (virtual_storage_arrays, drive_types) + +# +# Tables to alter +# + + +def upgrade(migrate_engine): + + from nova import context + from nova import db + from nova import flags + + FLAGS = flags.FLAGS + + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + for table in new_tables: + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + instances.create_column(vsa_id) + volumes.create_column(to_vsa_id) + volumes.create_column(from_vsa_id) + volumes.create_column(drive_type_id) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instances.drop_column(vsa_id) + volumes.drop_column(to_vsa_id) + volumes.drop_column(from_vsa_id) + volumes.drop_column(drive_type_id) + + for table in new_tables: + table.drop() diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py index d9e303599..9b64671a3 100644 --- a/nova/db/sqlalchemy/migration.py +++ b/nova/db/sqlalchemy/migration.py @@ -64,7 +64,8 @@ def db_version(): 'users', 'user_project_association', 'user_project_role_association', 'user_role_association', - 'volumes'): + 'volumes', + 'virtual_storage_arrays', 'drive_types'): assert table in meta.tables return db_version_control(1) except AssertionError: diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index d29d3d6f1..7f2e9d39c 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -247,6 +247,43 @@ class Instance(BASE, NovaBase): # assert(state in ['nostate', 'running', 'blocked', 'paused', # 'shutdown', 'shutoff', 'crashed']) + vsa_id = Column(Integer, ForeignKey('virtual_storage_arrays.id'), + nullable=True) + + +class VirtualStorageArray(BASE, NovaBase): + """ + Represents a virtual storage array supplying block storage to instances. + """ + __tablename__ = 'virtual_storage_arrays' + + id = Column(Integer, primary_key=True, autoincrement=True) + + @property + def name(self): + return FLAGS.vsa_name_template % self.id + + # User editable field for display in user-facing UIs + display_name = Column(String(255)) + display_description = Column(String(255)) + + project_id = Column(String(255)) + availability_zone = Column(String(255)) + + instance_type_id = Column(Integer, ForeignKey('instance_types.id')) + image_ref = Column(String(255)) + vc_count = Column(Integer, default=0) # number of requested VC instances + vol_count = Column(Integer, default=0) # total number of BE volumes + status = Column(String(255)) + + #admin_pass = Column(String(255)) + + #disks = relationship(VsaDiskAssociation, + # backref=backref('vsa', uselist=False), + # foreign_keys=id, + # primaryjoin='and_(VsaDiskAssociation.vsa_id == ' + # 'VirtualStorageArray.id)') + class InstanceActions(BASE, NovaBase): """Represents a guest VM's actions and results""" @@ -277,6 +314,12 @@ class InstanceTypes(BASE, NovaBase): primaryjoin='and_(Instance.instance_type_id == ' 'InstanceTypes.id)') + vsas = relationship(VirtualStorageArray, + backref=backref('vsa_instance_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(VirtualStorageArray.instance_type_id' + ' == InstanceTypes.id)') + class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" @@ -316,6 +359,57 @@ class Volume(BASE, NovaBase): provider_location = Column(String(255)) provider_auth = Column(String(255)) + to_vsa_id = Column(Integer, + ForeignKey('virtual_storage_arrays.id'), nullable=True) + from_vsa_id = Column(Integer, + ForeignKey('virtual_storage_arrays.id'), nullable=True) + drive_type_id = Column(Integer, + ForeignKey('drive_types.id'), nullable=True) + + +class DriveTypes(BASE, NovaBase): + """Represents the known drive types (storage media).""" + __tablename__ = 'drive_types' + + id = Column(Integer, primary_key=True, autoincrement=True) + + """ + @property + def name(self): + if self.capabilities: + return FLAGS.drive_type_template_long % \ + (self.type, str(self.size_gb), self.rpm, self.capabilities) + else: + return FLAGS.drive_type_template_short % \ + (self.type, str(self.size_gb), self.rpm) + """ + + name = Column(String(255), unique=True) + type = Column(String(255)) + size_gb = Column(Integer) + rpm = Column(String(255)) + capabilities = Column(String(255)) + + visible = Column(Boolean, default=True) + + volumes = relationship(Volume, + backref=backref('drive_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(Volume.drive_type_id == ' + 'DriveTypes.id)') + +# +#class VsaDiskAssociation(BASE, NovaBase): +# """associates drive types with Virtual Storage Arrays.""" +# __tablename__ = 'vsa_disk_association' +# +# id = Column(Integer, primary_key=True, autoincrement=True) +# +# drive_type_id = Column(Integer, ForeignKey('drive_types.id')) +# vsa_id = Column(Integer, ForeignKey('virtual_storage_arrays.id')) +# +# disk_num = Column(Integer, nullable=False) # number of disks + class Quota(BASE, NovaBase): """Represents a single quota override for a project. @@ -785,6 +879,7 @@ def register_models(): Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, Project, Certificate, ConsolePool, Console, Zone, + VirtualStorageArray, DriveTypes, AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: diff --git a/nova/exception.py b/nova/exception.py index ad6c005f8..a3d1a4b3f 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -311,6 +311,10 @@ class VolumeNotFoundForInstance(VolumeNotFound): message = _("Volume not found for instance %(instance_id)s.") +class VolumeNotFoundForVsa(VolumeNotFound): + message = _("Volume not found for vsa %(vsa_id)s.") + + class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") @@ -682,3 +686,19 @@ class PasteConfigNotFound(NotFound): class PasteAppNotFound(NotFound): message = _("Could not load paste app '%(name)s' from %(path)s") + + +class VirtualStorageArrayNotFound(NotFound): + message = _("Virtual Storage Array %(id)d could not be found.") + + +class VirtualStorageArrayNotFoundByName(NotFound): + message = _("Virtual Storage Array %(name)s could not be found.") + + +class VirtualDiskTypeNotFound(NotFound): + message = _("Drive Type %(id)d could not be found.") + + +class VirtualDiskTypeNotFoundByName(NotFound): + message = _("Drive Type %(name)s could not be found.") diff --git a/nova/flags.py b/nova/flags.py index 49355b436..8000eac4a 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -292,6 +292,7 @@ DEFINE_string('ajax_console_proxy_url', in the form "http://127.0.0.1:8000"') DEFINE_string('ajax_console_proxy_port', 8000, 'port that ajax_console_proxy binds') +DEFINE_string('vsa_topic', 'vsa', 'the topic that nova-vsa service listens on') DEFINE_bool('verbose', False, 'show debug output') DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit') DEFINE_bool('fake_network', False, @@ -364,6 +365,32 @@ DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager', 'Manager for volume') DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') +DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager', + 'Manager for vsa') +DEFINE_string('vc_image_name', 'vc_image', + 'the VC image ID (for a VC image that exists in DB Glance)') +#--------------------------------------------------------------------- +# VSA constants and enums +DEFINE_string('default_vsa_instance_type', 'm1.small', + 'default instance type for VSA instances') +DEFINE_integer('max_vcs_in_vsa', 32, + 'maxinum VCs in a VSA') +DEFINE_integer('vsa_part_size_gb', 100, + 'default partition size for shared capacity') + +DEFINE_string('vsa_status_creating', 'creating', + 'VSA creating (not ready yet)') +DEFINE_string('vsa_status_launching', 'launching', + 'Launching VCs (all BE volumes were created)') +DEFINE_string('vsa_status_created', 'created', + 'VSA fully created and ready for use') +DEFINE_string('vsa_status_partial', 'partial', + 'Some BE storage allocations failed') +DEFINE_string('vsa_status_failed', 'failed', + 'Some BE storage allocations failed') +DEFINE_string('vsa_status_deleting', 'deleting', + 'VSA started the deletion procedure') + # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.glance.GlanceImageService', diff --git a/nova/quota.py b/nova/quota.py index 58766e846..46322d60c 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -24,13 +24,13 @@ from nova import flags FLAGS = flags.FLAGS -flags.DEFINE_integer('quota_instances', 10, +flags.DEFINE_integer('quota_instances', 100, # 10 'number of instances allowed per project') flags.DEFINE_integer('quota_cores', 20, 'number of instance cores allowed per project') flags.DEFINE_integer('quota_ram', 50 * 1024, 'megabytes of instance ram allowed per project') -flags.DEFINE_integer('quota_volumes', 10, +flags.DEFINE_integer('quota_volumes', 100, # 10 'number of volumes allowed per project') flags.DEFINE_integer('quota_gigabytes', 1000, 'number of volume gigabytes allowed per project') diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py new file mode 100644 index 000000000..4277c0ba8 --- /dev/null +++ b/nova/scheduler/vsa.py @@ -0,0 +1,495 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +VSA Simple Scheduler +""" + +from nova import context +from nova import rpc +from nova import db +from nova import flags +from nova import utils +from nova.volume import api as volume_api +from nova.scheduler import driver +from nova.scheduler import simple +from nova import log as logging + +LOG = logging.getLogger('nova.scheduler.vsa') + +FLAGS = flags.FLAGS +flags.DEFINE_integer('gb_to_bytes_shift', 30, + 'Conversion shift between GB and bytes') +flags.DEFINE_integer('drive_type_approx_capacity_percent', 10, + 'The percentage range for capacity comparison') +flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10, + 'The number of unique hosts per storage allocation') +flags.DEFINE_boolean('vsa_select_unique_drives', True, + 'Allow selection of same host for multiple drives') + + +class VsaScheduler(simple.SimpleScheduler): + """Implements Naive Scheduler that tries to find least loaded host.""" + + def __init__(self, *args, **kwargs): + super(VsaScheduler, self).__init__(*args, **kwargs) + self._notify_all_volume_hosts("startup") + + def _notify_all_volume_hosts(self, event): + rpc.cast(context.get_admin_context(), + FLAGS.volume_topic, + {"method": "notification", + "args": {"event": event}}) + + def _compare_names(self, str1, str2): + result = str1.lower() == str2.lower() + # LOG.debug(_("Comparing %(str1)s and %(str2)s. "\ + # "Result %(result)s"), locals()) + return result + + def _compare_sizes_exact_match(self, cap_capacity, size_gb): + cap_capacity = int(cap_capacity) >> FLAGS.gb_to_bytes_shift + size_gb = int(size_gb) + result = cap_capacity == size_gb + # LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\ + # "Result %(result)s"), locals()) + return result + + def _compare_sizes_approxim(self, cap_capacity, size_gb): + cap_capacity = int(cap_capacity) >> FLAGS.gb_to_bytes_shift + size_gb = int(size_gb) + size_perc = size_gb * FLAGS.drive_type_approx_capacity_percent / 100 + + result = cap_capacity >= size_gb - size_perc and \ + cap_capacity <= size_gb + size_perc + # LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\ + # "Result %(result)s"), locals()) + return result + + def _qosgrp_match(self, drive_type, qos_values): + + # Add more entries for additional comparisons + compare_list = [{'cap1': 'DriveType', + 'cap2': 'type', + 'cmp_func': self._compare_names}, + {'cap1': 'DriveCapacity', + 'cap2': 'size_gb', + 'cmp_func': self._compare_sizes_approxim}] + + for cap in compare_list: + if cap['cap1'] in qos_values.keys() and \ + cap['cap2'] in drive_type.keys() and \ + cap['cmp_func'] is not None and \ + cap['cmp_func'](qos_values[cap['cap1']], + drive_type[cap['cap2']]): + # LOG.debug(_("One of required capabilities found: %s:%s"), + # cap['cap1'], drive_type[cap['cap2']]) + pass + else: + return False + return True + + def _filter_hosts(self, topic, request_spec, host_list=None): + + drive_type = request_spec['drive_type'] + LOG.debug(_("Filter hosts for drive type %(drive_type)s") % locals()) + + if host_list is None: + host_list = self.zone_manager.service_states.iteritems() + + filtered_hosts = [] # returns list of (hostname, capability_dict) + for host, host_dict in host_list: + for service_name, service_dict in host_dict.iteritems(): + if service_name != topic: + continue + + gos_info = service_dict.get('drive_qos_info', {}) + for qosgrp, qos_values in gos_info.iteritems(): + if self._qosgrp_match(drive_type, qos_values): + if qos_values['AvailableCapacity'] > 0: + LOG.debug(_("Adding host %s to the list"), host) + filtered_hosts.append((host, gos_info)) + else: + LOG.debug(_("Host %s has no free capacity. Skip"), + host) + break + + LOG.debug(_("Found hosts %(filtered_hosts)s") % locals()) + return filtered_hosts + + def _allowed_to_use_host(self, host, selected_hosts, unique): + if unique == False or \ + host not in [item[0] for item in selected_hosts]: + return True + else: + return False + + def _add_hostcap_to_list(self, selected_hosts, host, cap): + if host not in [item[0] for item in selected_hosts]: + selected_hosts.append((host, cap)) + + def _alg_least_used_host(self, request_spec, all_hosts, selected_hosts): + size = request_spec['size'] + drive_type = request_spec['drive_type'] + best_host = None + best_qoscap = None + best_cap = None + min_used = 0 + + LOG.debug(_("Selecting best host for %(size)sGB volume of type "\ + "%(drive_type)s from %(all_hosts)s"), locals()) + + for (host, capabilities) in all_hosts: + has_enough_capacity = False + used_capacity = 0 + for qosgrp, qos_values in capabilities.iteritems(): + + used_capacity = used_capacity + qos_values['TotalCapacity'] \ + - qos_values['AvailableCapacity'] + + if self._qosgrp_match(drive_type, qos_values): + # we found required qosgroup + + if size == 0: # full drive match + if qos_values['FullDrive']['NumFreeDrives'] > 0: + has_enough_capacity = True + matched_qos = qos_values + else: + break + else: + if qos_values['AvailableCapacity'] >= size and \ + (qos_values['PartitionDrive'][ + 'NumFreePartitions'] > 0 or \ + qos_values['FullDrive']['NumFreeDrives'] > 0): + has_enough_capacity = True + matched_qos = qos_values + else: + break + + if has_enough_capacity and \ + self._allowed_to_use_host(host, + selected_hosts, + unique) and \ + (best_host is None or used_capacity < min_used): + + min_used = used_capacity + best_host = host + best_qoscap = matched_qos + best_cap = capabilities + + if best_host: + self._add_hostcap_to_list(selected_hosts, host, best_cap) + LOG.debug(_("Best host found: %(best_host)s. "\ + "(used capacity %(min_used)s)"), locals()) + return (best_host, best_qoscap) + + def _alg_most_avail_capacity(self, request_spec, all_hosts, + selected_hosts, unique): + size = request_spec['size'] + drive_type = request_spec['drive_type'] + best_host = None + best_qoscap = None + best_cap = None + max_avail = 0 + + LOG.debug(_("Selecting best host for %(size)sGB volume of type "\ + "%(drive_type)s from %(all_hosts)s"), locals()) + + for (host, capabilities) in all_hosts: + for qosgrp, qos_values in capabilities.iteritems(): + if self._qosgrp_match(drive_type, qos_values): + # we found required qosgroup + + if size == 0: # full drive match + available = qos_values['FullDrive']['NumFreeDrives'] + else: + available = qos_values['AvailableCapacity'] + + if available > max_avail and \ + self._allowed_to_use_host(host, + selected_hosts, + unique): + max_avail = available + best_host = host + best_qoscap = qos_values + best_cap = capabilities + break # go to the next host + + if best_host: + self._add_hostcap_to_list(selected_hosts, host, best_cap) + LOG.debug(_("Best host found: %(best_host)s. "\ + "(available capacity %(max_avail)s)"), locals()) + + return (best_host, best_qoscap) + + def _select_hosts(self, request_spec, all_hosts, selected_hosts=None): + + #self._alg_most_avail_capacity(request_spec, all_hosts, selected_hosts) + + if selected_hosts is None: + selected_hosts = [] + + host = None + if len(selected_hosts) >= FLAGS.vsa_unique_hosts_per_alloc: + # try to select from already selected hosts only + LOG.debug(_("Maximum number of hosts selected (%d)"), + len(selected_hosts)) + unique = False + (host, qos_cap) = self._alg_most_avail_capacity(request_spec, + selected_hosts, + selected_hosts, + unique) + + LOG.debug(_("Selected excessive host %(host)s"), locals()) + else: + unique = FLAGS.vsa_select_unique_drives + + if host is None: + # if we've not tried yet (# of sel hosts < max) - unique=True + # or failed to select from selected_hosts - unique=False + # select from all hosts + (host, qos_cap) = self._alg_most_avail_capacity(request_spec, + all_hosts, + selected_hosts, + unique) + LOG.debug(_("Selected host %(host)s"), locals()) + + if host is None: + raise driver.WillNotSchedule(_("No available hosts")) + + return (host, qos_cap) + + def _provision_volume(self, context, vol, vsa_id, availability_zone): + + if availability_zone is None: + availability_zone = FLAGS.storage_availability_zone + + now = utils.utcnow() + options = { + 'size': vol['size'], + 'user_id': context.user_id, + 'project_id': context.project_id, + 'snapshot_id': None, + 'availability_zone': availability_zone, + 'status': "creating", + 'attach_status': "detached", + 'display_name': vol['name'], + 'display_description': vol['description'], + 'to_vsa_id': vsa_id, + 'drive_type_id': vol['drive_ref']['id'], + 'host': vol['host'], + 'scheduled_at': now + } + + size = vol['size'] + host = vol['host'] + name = vol['name'] + LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\ + "host %(host)s"), locals()) + + volume_ref = db.volume_create(context, options) + rpc.cast(context, + db.queue_get_for(context, "volume", vol['host']), + {"method": "create_volume", + "args": {"volume_id": volume_ref['id'], + "snapshot_id": None}}) + + def _check_host_enforcement(self, availability_zone): + if (availability_zone + and ':' in availability_zone + and context.is_admin): + zone, _x, host = availability_zone.partition(':') + service = db.service_get_by_args(context.elevated(), host, + 'nova-volume') + if not self.service_is_up(service): + raise driver.WillNotSchedule(_("Host %s not available") % host) + + return host + else: + return None + + def _assign_hosts_to_volumes(self, context, volume_params, forced_host): + + prev_drive_type_id = None + selected_hosts = [] + + LOG.debug(_("volume_params %(volume_params)s") % locals()) + + for vol in volume_params: + LOG.debug(_("Assigning host to volume %s") % vol['name']) + + if forced_host: + vol['host'] = forced_host + vol['capabilities'] = None + continue + + drive_type = vol['drive_ref'] + request_spec = {'size': vol['size'], + 'drive_type': dict(drive_type)} + + if prev_drive_type_id != drive_type['id']: + # generate list of hosts for this drive type + all_hosts = self._filter_hosts("volume", request_spec) + prev_drive_type_id = drive_type['id'] + + (host, qos_cap) = self._select_hosts(request_spec, + all_hosts, selected_hosts) + vol['host'] = host + vol['capabilities'] = qos_cap + self._consume_resource(qos_cap, vol['size'], -1) + + LOG.debug(_("Assigned host %(host)s, capabilities %(qos_cap)s"), + locals()) + + LOG.debug(_("END: volume_params %(volume_params)s") % locals()) + + def schedule_create_volumes(self, context, request_spec, + availability_zone, *_args, **_kwargs): + """Picks hosts for hosting multiple volumes.""" + + num_volumes = request_spec.get('num_volumes') + LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") % + locals()) + + LOG.debug(_("Service states BEFORE %s"), + self.zone_manager.service_states) + + vsa_id = request_spec.get('vsa_id') + volume_params = request_spec.get('volumes') + + host = self._check_host_enforcement(availability_zone) + + try: + self._assign_hosts_to_volumes(context, volume_params, host) + + for vol in volume_params: + self._provision_volume(context, vol, vsa_id, availability_zone) + + LOG.debug(_("Service states AFTER %s"), + self.zone_manager.service_states) + + except: + if vsa_id: + db.vsa_update(context, vsa_id, + dict(status=FLAGS.vsa_status_failed)) + + for vol in volume_params: + if 'capabilities' in vol: + self._consume_resource(vol['capabilities'], + vol['size'], 1) + LOG.debug(_("Service states AFTER %s"), + self.zone_manager.service_states) + raise + + return None + + def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): + """Picks the best host based on requested drive type capability.""" + volume_ref = db.volume_get(context, volume_id) + + host = self._check_host_enforcement(volume_ref['availability_zone']) + if host: + now = utils.utcnow() + db.volume_update(context, volume_id, {'host': host, + 'scheduled_at': now}) + return host + + drive_type = volume_ref['drive_type'] + if drive_type is None: + LOG.debug(_("Non-VSA volume %d"), volume_ref['id']) + return super(VsaScheduler, self).schedule_create_volume(context, + volume_id, *_args, **_kwargs) + drive_type = dict(drive_type) + + # otherwise - drive type is loaded + LOG.debug(_("Spawning volume %d with drive type %s"), + volume_ref['id'], drive_type) + + LOG.debug(_("Service states BEFORE %s"), + self.zone_manager.service_states) + + request_spec = {'size': volume_ref['size'], + 'drive_type': drive_type} + hosts = self._filter_hosts("volume", request_spec) + + try: + (host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts) + except: + if volume_ref['to_vsa_id']: + db.vsa_update(context, volume_ref['to_vsa_id'], + dict(status=FLAGS.vsa_status_failed)) + raise + #return super(VsaScheduler, self).schedule_create_volume(context, + # volume_id, *_args, **_kwargs) + + if host: + now = utils.utcnow() + db.volume_update(context, volume_id, {'host': host, + 'scheduled_at': now}) + self._consume_resource(qos_cap, volume_ref['size'], -1) + + LOG.debug(_("Service states AFTER %s"), + self.zone_manager.service_states) + return host + + def _consume_full_drive(self, qos_values, direction): + qos_values['FullDrive']['NumFreeDrives'] += direction + qos_values['FullDrive']['NumOccupiedDrives'] -= direction + + def _consume_partition(self, qos_values, size, direction): + + if qos_values['PartitionDrive']['PartitionSize'] != 0: + partition_size = qos_values['PartitionDrive']['PartitionSize'] + else: + partition_size = size + part_per_drive = qos_values['DriveCapacity'] / partition_size + + if direction == -1 and \ + qos_values['PartitionDrive']['NumFreePartitions'] == 0: + + self._consume_full_drive(qos_values, direction) + qos_values['PartitionDrive']['NumFreePartitions'] += \ + part_per_drive + + qos_values['PartitionDrive']['NumFreePartitions'] += direction + qos_values['PartitionDrive']['NumOccupiedPartitions'] -= direction + + if direction == 1 and \ + qos_values['PartitionDrive']['NumFreePartitions'] >= \ + part_per_drive: + + self._consume_full_drive(qos_values, direction) + qos_values['PartitionDrive']['NumFreePartitions'] -= \ + part_per_drive + + def _consume_resource(self, qos_values, size, direction): + if qos_values is None: + LOG.debug(_("No capability selected for volume of size %(size)s"), + locals()) + return + + if size == 0: # full drive match + qos_values['AvailableCapacity'] += direction * \ + qos_values['DriveCapacity'] + self._consume_full_drive(qos_values, direction) + else: + qos_values['AvailableCapacity'] += direction * \ + (size << FLAGS.gb_to_bytes_shift) + self._consume_partition(qos_values, + size << FLAGS.gb_to_bytes_shift, + direction) + return diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index f99e1713d..36e469be3 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -242,7 +242,7 @@ class LibvirtConnTestCase(test.TestCase): return """ - + diff --git a/nova/volume/api.py b/nova/volume/api.py index 7d27abff9..f81222017 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -41,7 +41,9 @@ LOG = logging.getLogger('nova.volume') class API(base.Base): """API for interacting with the volume manager.""" - def create(self, context, size, snapshot_id, name, description): + def create(self, context, size, snapshot_id, name, description, + to_vsa_id=None, from_vsa_id=None, drive_type_id=None, + availability_zone=None): if snapshot_id != None: snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": @@ -50,25 +52,36 @@ class API(base.Base): if not size: size = snapshot['volume_size'] - if quota.allowed_volumes(context, 1, size) < 1: - pid = context.project_id - LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" - " %(size)sG volume") % locals()) - raise quota.QuotaError(_("Volume quota exceeded. You cannot " - "create a volume of size %sG") % size) + if availability_zone is None: + availability_zone = FLAGS.storage_availability_zone + + if to_vsa_id is None: + # VP-TODO: for now don't check quotas for BE volumes + if quota.allowed_volumes(context, 1, size) < 1: + pid = context.project_id + LOG.warn(_("Quota exceeeded for %(pid)s, tried to create" + " %(size)sG volume") % locals()) + raise quota.QuotaError(_("Volume quota exceeded. You cannot " + "create a volume of size %sG") % size) options = { 'size': size, 'user_id': context.user_id, 'project_id': context.project_id, 'snapshot_id': snapshot_id, - 'availability_zone': FLAGS.storage_availability_zone, + 'availability_zone': availability_zone, 'status': "creating", 'attach_status': "detached", 'display_name': name, - 'display_description': description} + 'display_description': description, + 'to_vsa_id': to_vsa_id, + 'from_vsa_id': from_vsa_id, + 'drive_type_id': drive_type_id} volume = self.db.volume_create(context, options) + if from_vsa_id is not None: # for FE VSA volumes do nothing + return volume + rpc.cast(context, FLAGS.scheduler_topic, {"method": "create_volume", @@ -89,6 +102,12 @@ class API(base.Base): volume = self.get(context, volume_id) if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) + + if volume['from_vsa_id'] is not None: + self.db.volume_destroy(context, volume['id']) + LOG.debug(_("volume %d: deleted successfully"), volume['id']) + return + now = utils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) @@ -110,6 +129,15 @@ class API(base.Base): return self.db.volume_get_all(context) return self.db.volume_get_all_by_project(context, context.project_id) + def get_all_by_vsa(self, context, vsa_id, direction): + if direction == "to": + return self.db.volume_get_all_assigned_to_vsa(context, vsa_id) + elif direction == "from": + return self.db.volume_get_all_assigned_from_vsa(context, vsa_id) + else: + raise exception.ApiError(_("Unsupported vol assignment type %s"), + direction) + def get_snapshot(self, context, snapshot_id): rv = self.db.snapshot_get(context, snapshot_id) return dict(rv.iteritems()) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 23e845deb..ec09325d8 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -501,7 +501,15 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) if not iscsi_properties['target_discovered']: - self._run_iscsiadm(iscsi_properties, ('--op', 'new')) + # zadara-begin: Bug in cactus. _run_iscsiadm() cannot accept + # multiple args for iscsi-command. Like in --op new. Hence + # using a local version here which does the same thing + (out, err) = self._execute('sudo', 'iscsiadm', '--op', 'new', + '-m', 'node', + '-T', iscsi_properties['target_iqn'], + '-p', iscsi_properties['target_portal']) + # self._run_iscsiadm(iscsi_properties, ('--op', 'new')) + # zadara-end if iscsi_properties.get('auth_method'): self._iscsiadm_update(iscsi_properties, @@ -553,7 +561,15 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) self._iscsiadm_update(iscsi_properties, "node.startup", "manual") self._run_iscsiadm(iscsi_properties, "--logout") - self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) + # zadara-begin: Bug in cactus. _run_iscsiadm() cannot accept + # multiple args for iscsi-command. Like in --op delete. Hence + # using a local version here which does the same thing + (out, err) = self._execute('sudo', 'iscsiadm', '--op', 'delete', + '-m', 'node', + '-T', iscsi_properties['target_iqn'], + '-p', iscsi_properties['target_portal']) + #self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) + # zadara-end def check_for_export(self, context, volume_id): """Make sure volume is exported.""" diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 798bd379a..3e2892fee 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -42,6 +42,7 @@ intact. """ +import time from nova import context from nova import exception @@ -49,6 +50,7 @@ from nova import flags from nova import log as logging from nova import manager from nova import utils +from nova import rpc LOG = logging.getLogger('nova.volume.manager') @@ -58,22 +60,40 @@ flags.DEFINE_string('storage_availability_zone', 'availability zone of this service') flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver', 'Driver to use for volume creation') +flags.DEFINE_string('vsa_volume_driver', 'nova.volume.san.ZadaraVsaDriver', + 'Driver to use for FE/BE volume creation with VSA') flags.DEFINE_boolean('use_local_volumes', True, 'if True, will not discover local volumes') +flags.DEFINE_integer('volume_state_interval', 60, + 'Interval in seconds for querying volumes status') class VolumeManager(manager.SchedulerDependentManager): """Manages attachable block storage devices.""" - def __init__(self, volume_driver=None, *args, **kwargs): + def __init__(self, volume_driver=None, vsa_volume_driver=None, + *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" if not volume_driver: volume_driver = FLAGS.volume_driver self.driver = utils.import_object(volume_driver) + if not vsa_volume_driver: + vsa_volume_driver = FLAGS.vsa_volume_driver + self.vsadriver = utils.import_object(vsa_volume_driver) super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) # NOTE(vish): Implementation specific db handling is done # by the driver. self.driver.db = self.db + self.vsadriver.db = self.db + self._last_volume_stats = [] + #self._last_host_check = 0 + + def _get_driver(self, volume_ref): + if volume_ref['to_vsa_id'] is None and \ + volume_ref['from_vsa_id'] is None: + return self.driver + else: + return self.vsadriver def init_host(self): """Do any initialization that needs to be run if this is a @@ -84,10 +104,15 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug(_("Re-exporting %s volumes"), len(volumes)) for volume in volumes: if volume['status'] in ['available', 'in-use']: - self.driver.ensure_export(ctxt, volume) + driver = self._get_driver(volume) + driver.ensure_export(ctxt, volume) else: LOG.info(_("volume %s: skipping export"), volume['name']) + def create_volumes(self, context, request_spec, availability_zone): + LOG.info(_("create_volumes called with req=%(request_spec)s, "\ + "availability_zone=%(availability_zone)s"), locals()) + def create_volume(self, context, volume_id, snapshot_id=None): """Creates and exports the volume.""" context = context.elevated() @@ -101,28 +126,31 @@ class VolumeManager(manager.SchedulerDependentManager): # before passing it to the driver. volume_ref['host'] = self.host + driver = self._get_driver(volume_ref) try: vol_name = volume_ref['name'] vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) if snapshot_id == None: - model_update = self.driver.create_volume(volume_ref) + model_update = driver.create_volume(volume_ref) else: snapshot_ref = self.db.snapshot_get(context, snapshot_id) - model_update = self.driver.create_volume_from_snapshot( + model_update = driver.create_volume_from_snapshot( volume_ref, snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) LOG.debug(_("volume %s: creating export"), volume_ref['name']) - model_update = self.driver.create_export(context, volume_ref) + model_update = driver.create_export(context, volume_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) - except Exception: + # except Exception: + except: self.db.volume_update(context, volume_ref['id'], {'status': 'error'}) + self._notify_vsa(context, volume_ref, 'error') raise now = utils.utcnow() @@ -130,8 +158,20 @@ class VolumeManager(manager.SchedulerDependentManager): volume_ref['id'], {'status': 'available', 'launched_at': now}) LOG.debug(_("volume %s: created successfully"), volume_ref['name']) + + self._notify_vsa(context, volume_ref, 'available') + return volume_id + def _notify_vsa(self, context, volume_ref, status): + if volume_ref['to_vsa_id'] is not None: + rpc.cast(context, + FLAGS.vsa_topic, + {"method": "vsa_volume_created", + "args": {"vol_id": volume_ref['id'], + "vsa_id": volume_ref['to_vsa_id'], + "status": status}}) + def delete_volume(self, context, volume_id): """Deletes and unexports volume.""" context = context.elevated() @@ -141,14 +181,15 @@ class VolumeManager(manager.SchedulerDependentManager): if volume_ref['host'] != self.host: raise exception.Error(_("Volume is not local to this node")) + driver = self._get_driver(volume_ref) try: LOG.debug(_("volume %s: removing export"), volume_ref['name']) - self.driver.remove_export(context, volume_ref) + driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) - self.driver.delete_volume(volume_ref) + driver.delete_volume(volume_ref) except exception.VolumeIsBusy, e: LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) - self.driver.ensure_export(context, volume_ref) + driver.ensure_export(context, volume_ref) self.db.volume_update(context, volume_ref['id'], {'status': 'available'}) return True @@ -171,6 +212,7 @@ class VolumeManager(manager.SchedulerDependentManager): try: snap_name = snapshot_ref['name'] LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) + # snapshot-related operations are irrelevant for vsadriver model_update = self.driver.create_snapshot(snapshot_ref) if model_update: self.db.snapshot_update(context, snapshot_ref['id'], @@ -194,6 +236,7 @@ class VolumeManager(manager.SchedulerDependentManager): try: LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name']) + # snapshot-related operations are irrelevant for vsadriver self.driver.delete_snapshot(snapshot_ref) except Exception: self.db.snapshot_update(context, @@ -211,23 +254,75 @@ class VolumeManager(manager.SchedulerDependentManager): Returns path to device.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) + driver = self._get_driver(volume_ref) if volume_ref['host'] == self.host and FLAGS.use_local_volumes: - path = self.driver.local_path(volume_ref) + path = driver.local_path(volume_ref) else: - path = self.driver.discover_volume(context, volume_ref) + path = driver.discover_volume(context, volume_ref) return path def remove_compute_volume(self, context, volume_id): """Remove remote volume on compute host.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) + driver = self._get_driver(volume_ref) if volume_ref['host'] == self.host and FLAGS.use_local_volumes: return True else: - self.driver.undiscover_volume(volume_ref) + driver.undiscover_volume(volume_ref) def check_for_export(self, context, instance_id): """Make sure whether volume is exported.""" instance_ref = self.db.instance_get(context, instance_id) for volume in instance_ref['volumes']: - self.driver.check_for_export(context, volume['id']) + driver = self._get_driver(volume) + driver.check_for_export(context, volume['id']) + + def periodic_tasks(self, context=None): + """Tasks to be run at a periodic interval.""" + + error_list = [] + try: + self._report_driver_status() + except Exception as ex: + LOG.warning(_("Error during report_driver_status(): %s"), + unicode(ex)) + error_list.append(ex) + + super(VolumeManager, self).periodic_tasks(context) + + return error_list + + def _volume_stats_changed(self, stat1, stat2): + #LOG.info(_("stat1=%s"), stat1) + #LOG.info(_("stat2=%s"), stat2) + + if len(stat1) != len(stat2): + return True + for (k, v) in stat1.iteritems(): + if (k, v) not in stat2.iteritems(): + return True + return False + + def _report_driver_status(self): + #curr_time = time.time() + #LOG.info(_("Report Volume node status")) + #if curr_time - self._last_host_check > FLAGS.volume_state_interval: + # self._last_host_check = curr_time + + LOG.info(_("Updating volume status")) + + volume_stats = self.vsadriver.get_volume_stats(refresh=True) + if self._volume_stats_changed(self._last_volume_stats, volume_stats): + LOG.info(_("New capabilities found: %s"), volume_stats) + self._last_volume_stats = volume_stats + + # This will grab info about the host and queue it + # to be sent to the Schedulers. + self.update_service_capabilities(self._last_volume_stats) + else: + self.update_service_capabilities(None) + + def notification(self, context, event): + LOG.info(_("Notification {%s} received"), event) + self._last_volume_stats = [] diff --git a/nova/volume/san.py b/nova/volume/san.py index 9532c8116..6a962c6f2 100644 --- a/nova/volume/san.py +++ b/nova/volume/san.py @@ -26,6 +26,7 @@ import paramiko from xml.etree import ElementTree +from nova import context from nova import exception from nova import flags from nova import log as logging @@ -64,12 +65,16 @@ class SanISCSIDriver(ISCSIDriver): # discover_volume is still OK # undiscover_volume is still OK - def _connect_to_ssh(self): + def _connect_to_ssh(self, san_ip=None): + if san_ip: + ssh_ip = san_ip + else: + ssh_ip = FLAGS.san_ip ssh = paramiko.SSHClient() #TODO(justinsb): We need a better SSH key policy ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if FLAGS.san_password: - ssh.connect(FLAGS.san_ip, + ssh.connect(ssh_ip, port=FLAGS.san_ssh_port, username=FLAGS.san_login, password=FLAGS.san_password) @@ -77,7 +82,7 @@ class SanISCSIDriver(ISCSIDriver): privatekeyfile = os.path.expanduser(FLAGS.san_privatekey) # It sucks that paramiko doesn't support DSA keys privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile) - ssh.connect(FLAGS.san_ip, + ssh.connect(ssh_ip, port=FLAGS.san_ssh_port, username=FLAGS.san_login, pkey=privatekey) @@ -85,9 +90,9 @@ class SanISCSIDriver(ISCSIDriver): raise exception.Error(_("Specify san_password or san_privatekey")) return ssh - def _run_ssh(self, command, check_exit_code=True): + def _run_ssh(self, command, check_exit_code=True, san_ip=None): #TODO(justinsb): SSH connection caching (?) - ssh = self._connect_to_ssh() + ssh = self._connect_to_ssh(san_ip) #TODO(justinsb): Reintroduce the retry hack ret = ssh_execute(ssh, command, check_exit_code=check_exit_code) @@ -583,3 +588,311 @@ class HpSanISCSIDriver(SanISCSIDriver): cliq_args['volumeName'] = volume['name'] self._cliq_run_xml("unassignVolume", cliq_args) + + +class ZadaraVsaDriver(SanISCSIDriver): + """Executes commands relating to Virtual Storage Array volumes. + + There are two types of volumes. Front-end(FE) volumes and Back-end(BE) + volumes. + + FE volumes are nova-volumes that are exported by VSA instance & can be + consumed by user instances. We use SSH to connect into the VSA instance + to execute those steps. + + BE volumes are nova-volumes that are attached as back-end storage for the + VSA instance. + + VSA instance essentially consumes the BE volumes and allows creation of FE + volumes over it. + """ + + """ Volume Driver methods """ + def create_volume(self, volume): + """Creates FE/BE volume.""" + if volume['to_vsa_id']: + self._create_be_volume(volume) + else: + self._create_fe_volume(volume) + + def delete_volume(self, volume): + """Deletes FE/BE volume.""" + if volume['to_vsa_id']: + self._delete_be_volume(volume) + else: + self._delete_fe_volume(volume) + + def local_path(self, volume): + # TODO: Is this needed here? + raise exception.Error(_("local_path not supported")) + + def ensure_export(self, context, volume): + """On bootup synchronously ensures a volume export is available.""" + if volume['to_vsa_id']: + return self._ensure_be_export(context, volume) + + # Not required for FE volumes. VSA VM will ensure volume exposure + pass + + def create_export(self, context, volume): + """For first time creates volume export.""" + if volume['to_vsa_id']: + return self._create_be_export(context, volume) + else: + return self._create_fe_export(context, volume) + + def remove_export(self, context, volume): + if volume['to_vsa_id']: + return self._remove_be_export(context, volume) + else: + return self._remove_fe_export(context, volume) + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + # skip the flags.san_ip check & do the regular check + + if not (FLAGS.san_password or FLAGS.san_privatekey): + raise exception.Error(_("Specify san_password or san_privatekey")) + + """ Internal BE Volume methods """ + def _create_be_volume(self, volume): + """Creates BE volume.""" + if int(volume['size']) == 0: + sizestr = '0' # indicates full-partition + else: + sizestr = '%s' % (int(volume['size']) << 30) # size in bytes + + # Set the qos-str to default type sas + # TODO - later for this piece we will get the direct qos-group name + # in create_volume and hence this lookup will not be needed + qosstr = 'SAS_1000' + drive_type = volume.get('drive_type') + if drive_type is not None: + # for now just use the qos-type string from the disktypes. + qosstr = drive_type['type'] + ("_%s" % drive_type['size_gb']) + + self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + 'create_qospart', + '--qos', qosstr, + '--pname', volume['name'], + '--psize', sizestr, + check_exit_code=0) + LOG.debug(_("VSA BE create_volume for %s succeeded"), volume['name']) + + def _delete_be_volume(self, volume): + try: + self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + 'delete_partition', + '--pname', volume['name'], + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name']) + return + + LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name']) + + def _create_be_export(self, context, volume): + """create BE export for a volume""" + self._ensure_iscsi_targets(context, volume['host']) + iscsi_target = self.db.volume_allocate_iscsi_target(context, + volume['id'], + volume['host']) + return self._common_be_export(context, volume, iscsi_target) + + def _ensure_be_export(self, context, volume): + """ensure BE export for a volume""" + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping ensure_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + + return self._common_be_export(context, volume, iscsi_target) + + def _common_be_export(self, context, volume, iscsi_target): + """ + Common logic that asks zadara_sncfg to setup iSCSI target/lun for + this volume + """ + (out, err) = self._sync_exec('sudo', + '/var/lib/zadara/bin/zadara_sncfg', + 'create_export', + '--pname', volume['name'], + '--tid', iscsi_target, + check_exit_code=0) + + result_xml = ElementTree.fromstring(out) + response_node = result_xml.find("Sn") + if response_node is None: + msg = "Malformed response from zadara_sncfg" + raise exception.Error(msg) + + sn_ip = response_node.findtext("SnIp") + sn_iqn = response_node.findtext("IqnName") + iscsi_portal = sn_ip + ":3260," + ("%s" % iscsi_target) + + model_update = {} + model_update['provider_location'] = ("%s %s" % + (iscsi_portal, + sn_iqn)) + return model_update + + def _remove_be_export(self, context, volume): + """Removes BE export for a volume.""" + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + + try: + self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + 'remove_export', + '--pname', volume['name'], + '--tid', iscsi_target, + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("VSA BE remove_export for %s failed"), volume['name']) + return + + def _get_qosgroup_summary(self): + """gets the list of qosgroups from Zadara SN""" + (out, err) = self._sync_exec('sudo', + '/var/lib/zadara/bin/zadara_sncfg', + 'get_qosgroups_xml', + check_exit_code=0) + qos_groups = {} + #qos_groups = [] + result_xml = ElementTree.fromstring(out) + for element in result_xml.findall('QosGroup'): + qos_group = {} + # get the name of the group. + # If we cannot find it, forget this element + group_name = element.findtext("Name") + if not group_name: + continue + + # loop through all child nodes & fill up attributes of this group + for child in element.getchildren(): + # two types of elements - property of qos-group & sub property + # classify them accordingly + if child.text: + qos_group[child.tag] = int(child.text) \ + if child.text.isdigit() else child.text + else: + subelement = {} + for subchild in child.getchildren(): + subelement[subchild.tag] = int(subchild.text) \ + if subchild.text.isdigit() else subchild.text + qos_group[child.tag] = subelement + + # Now add this group to the master qos_groups + qos_groups[group_name] = qos_group + #qos_groups.append(qos_group) + + return qos_groups + + """ Internal FE Volume methods """ + def _vsa_run(self, volume, verb, vsa_args): + """ + Runs a command over SSH to VSA instance and checks for return status + """ + vsa_arg_strings = [] + + if vsa_args: + for k, v in vsa_args.items(): + vsa_arg_strings.append(" --%s %s" % (k, v)) + + # Form the zadara_cfg script that will do the configuration at VSA VM + cmd = "/var/lib/zadara/bin/zadara_cfg.py " + verb + \ + ''.join(vsa_arg_strings) + + # get the list of IP's corresponding to VSA VM's + vsa_ips = self.db.vsa_get_vc_ips_list(context.get_admin_context(), + volume['from_vsa_id']) + if not vsa_ips: + raise exception.Error(_("Cannot Lookup VSA VM's IP")) + return + + # pick the first element in the return's fixed_ip for SSH + vsa_ip = vsa_ips[0]['fixed'] + + (out, _err) = self._run_ssh(cmd, san_ip=vsa_ip) + + # check the xml StatusCode to check fro real status + result_xml = ElementTree.fromstring(out) + + status = result_xml.findtext("StatusCode") + if status != '0': + statusmsg = result_xml.findtext("StatusMessage") + msg = (_('vsa_run failed to ' + verb + ' for ' + volume['name'] + + '. Result=' + str(statusmsg))) + raise exception.Error(msg) + + return out, _err + + def _create_fe_volume(self, volume): + """Creates FE volume.""" + vsa_args = {} + vsa_args['volname'] = volume['name'] + if int(volume['size']) == 0: + sizestr = '100M' + else: + sizestr = '%sG' % volume['size'] + vsa_args['volsize'] = sizestr + (out, _err) = self._vsa_run(volume, "create_volume", vsa_args) + + LOG.debug(_("VSA FE create_volume for %s suceeded"), volume['name']) + + def _delete_fe_volume(self, volume): + """Deletes FE volume.""" + vsa_args = {} + vsa_args['volname'] = volume['name'] + (out, _err) = self._vsa_run(volume, "delete_volume", vsa_args) + LOG.debug(_("VSA FE delete_volume for %s suceeded"), volume['name']) + return + + def _create_fe_export(self, context, volume): + """Create FE volume exposure at VSA VM""" + vsa_args = {} + vsa_args['volname'] = volume['name'] + (out, _err) = self._vsa_run(volume, "create_export", vsa_args) + + result_xml = ElementTree.fromstring(out) + response_node = result_xml.find("Vsa") + if response_node is None: + msg = "Malformed response to VSA command " + raise exception.Error(msg) + + LOG.debug(_("VSA create_export for %s suceeded"), volume['name']) + + vsa_ip = response_node.findtext("VsaIp") + vsa_iqn = response_node.findtext("IqnName") + vsa_interface = response_node.findtext("VsaInterface") + iscsi_portal = vsa_ip + ":3260," + vsa_interface + + model_update = {} + model_update['provider_location'] = ("%s %s" % + (iscsi_portal, + vsa_iqn)) + + return model_update + + def remove_fe_export(self, context, volume): + """Remove FE volume exposure at VSA VM""" + vsa_args = {} + vsa_args['volname'] = volume['name'] + (out, _err) = self._vsa_run(volume, "remove_export", vsa_args) + LOG.debug(_("VSA FE remove_export for %s suceeded"), volume['name']) + return + + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. If 'refresh' is + True, run the update first.""" + + drive_info = self._get_qosgroup_summary() + return {'drive_qos_info': drive_info} diff --git a/nova/vsa/__init__.py b/nova/vsa/__init__.py new file mode 100644 index 000000000..a94a6b7a4 --- /dev/null +++ b/nova/vsa/__init__.py @@ -0,0 +1,18 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova.vsa.api import API diff --git a/nova/vsa/api.py b/nova/vsa/api.py new file mode 100644 index 000000000..ed83ff563 --- /dev/null +++ b/nova/vsa/api.py @@ -0,0 +1,407 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to Virtual Storage Arrays (VSAs). +""" + +#import datetime +import sys +import base64 + +from xml.etree import ElementTree +from xml.etree.ElementTree import Element, SubElement + +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import quota +from nova import rpc +from nova.db import base + +from nova import compute +from nova import volume +from nova.compute import instance_types +from nova.vsa import drive_types + + +FLAGS = flags.FLAGS +flags.DEFINE_boolean('vsa_multi_vol_creation', True, + 'Ask scheduler to create multiple volumes in one call') + +LOG = logging.getLogger('nova.vsa') + + +class API(base.Base): + """API for interacting with the VSA manager.""" + + def __init__(self, compute_api=None, volume_api=None, **kwargs): + self.compute_api = compute_api or compute.API() + self.volume_api = volume_api or volume.API() + super(API, self).__init__(**kwargs) + + def _get_default_vsa_instance_type(self): + return instance_types.get_instance_type_by_name( + FLAGS.default_vsa_instance_type) + + def _check_storage_parameters(self, context, vsa_name, storage, shared): + """ + Translates storage array of disks to the list of volumes + :param storage: List of dictionaries with following keys: + disk_name, num_disks, size + :param shared: Specifies if storage is dedicated or shared. + For shared storage disks split into partitions + """ + volume_params = [] + for node in storage: + + name = node.get('drive_name', None) + num_disks = node.get('num_drives', 1) + + if name is None: + raise exception.ApiError(_("No drive_name param found in %s"), + node) + + # find DB record for this disk + try: + drive_ref = drive_types.drive_type_get_by_name(context, name) + except exception.NotFound: + raise exception.ApiError(_("Invalid drive type name %s"), + name) + + # if size field present - override disk size specified in DB + size = node.get('size', drive_ref['size_gb']) + + if shared: + part_size = FLAGS.vsa_part_size_gb + total_capacity = num_disks * size + num_volumes = total_capacity / part_size + size = part_size + else: + num_volumes = num_disks + size = 0 # special handling for full drives + + for i in range(num_volumes): + # VP-TODO: potentialy may conflict with previous volumes + volume_name = vsa_name + ("_%s_vol-%d" % (name, i)) + volume = { + 'size': size, + 'snapshot_id': None, + 'name': volume_name, + 'description': 'BE volume for ' + volume_name, + 'drive_ref': drive_ref + } + volume_params.append(volume) + + return volume_params + + def create(self, context, display_name='', display_description='', + vc_count=1, instance_type=None, image_name=None, + availability_zone=None, storage=[], shared=None): + """ + Provision VSA instance with corresponding compute instances + and associated volumes + :param storage: List of dictionaries with following keys: + disk_name, num_disks, size + :param shared: Specifies if storage is dedicated or shared. + For shared storage disks split into partitions + """ + + if vc_count > FLAGS.max_vcs_in_vsa: + LOG.warning(_("Requested number of VCs (%d) is too high."\ + " Setting to default"), vc_count) + vc_count = FLAGS.max_vcs_in_vsa + + if instance_type is None: + instance_type = self._get_default_vsa_instance_type() + + if availability_zone is None: + availability_zone = FLAGS.storage_availability_zone + + if storage is None: + storage = [] + + if shared is None or shared == 'False' or shared == False: + shared = False + else: + shared = True + + # check if image is ready before starting any work + if image_name is None or image_name == '': + image_name = FLAGS.vc_image_name + try: + image_service = self.compute_api.image_service + vc_image = image_service.show_by_name(context, image_name) + vc_image_href = vc_image['id'] + except exception.ImageNotFound: + raise exception.ApiError(_("Failed to find configured image %s"), + image_name) + + options = { + 'display_name': display_name, + 'display_description': display_description, + 'project_id': context.project_id, + 'availability_zone': availability_zone, + 'instance_type_id': instance_type['id'], + 'image_ref': vc_image_href, + 'vc_count': vc_count, + 'status': FLAGS.vsa_status_creating, + } + LOG.info(_("Creating VSA: %s") % options) + + # create DB entry for VSA instance + try: + vsa_ref = self.db.vsa_create(context, options) + except exception.Error: + raise exception.ApiError(_(sys.exc_info()[1])) + vsa_id = vsa_ref['id'] + vsa_name = vsa_ref['name'] + + # check storage parameters + try: + volume_params = self._check_storage_parameters(context, vsa_name, + storage, shared) + except exception.ApiError: + self.update_vsa_status(context, vsa_id, + status=FLAGS.vsa_status_failed) + raise + + # after creating DB entry, re-check and set some defaults + updates = {} + if (not hasattr(vsa_ref, 'display_name') or + vsa_ref.display_name is None or + vsa_ref.display_name == ''): + updates['display_name'] = display_name = vsa_name + updates['vol_count'] = len(volume_params) + vsa_ref = self.update(context, vsa_id, **updates) + + # create volumes + if FLAGS.vsa_multi_vol_creation: + if len(volume_params) > 0: + #filter_class = 'nova.scheduler.vsa.InstanceTypeFilter' + request_spec = { + 'num_volumes': len(volume_params), + 'vsa_id': vsa_id, + 'volumes': volume_params, + #'filter': filter_class, + } + + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "create_volumes", + "args": {"topic": FLAGS.volume_topic, + "request_spec": request_spec, + "availability_zone": availability_zone}}) + else: + # create BE volumes one-by-one + for vol in volume_params: + try: + vol_name = vol['name'] + vol_size = vol['size'] + LOG.debug(_("VSA ID %(vsa_id)d %(vsa_name)s: Create "\ + "volume %(vol_name)s, %(vol_size)d GB"), + locals()) + + vol_ref = self.volume_api.create(context, + vol_size, + vol['snapshot_id'], + vol_name, + vol['description'], + to_vsa_id=vsa_id, + drive_type_id=vol['drive_ref'].get('id'), + availability_zone=availability_zone) + except: + self.update_vsa_status(context, vsa_id, + status=FLAGS.vsa_status_partial) + raise + + if len(volume_params) == 0: + # No BE volumes - ask VSA manager to start VCs + rpc.cast(context, + FLAGS.vsa_topic, + {"method": "create_vsa", + "args": {"vsa_id": vsa_id}}) + + return vsa_ref + + def update_vsa_status(self, context, vsa_id, status): + updates = dict(status=status) + LOG.info(_("VSA ID %(vsa_id)d: Update VSA status to %(status)s"), + locals()) + return self.update(context, vsa_id, **updates) + + def update(self, context, vsa_id, **kwargs): + """Updates the VSA instance in the datastore. + + :param context: The security context + :param vsa_id: ID of the VSA instance to update + :param kwargs: All additional keyword args are treated + as data fields of the instance to be + updated + + :returns: None + """ + LOG.info(_("VSA ID %(vsa_id)d: Update VSA call"), locals()) + + vc_count = kwargs.get('vc_count', None) + if vc_count is not None: + # VP-TODO: This request may want to update number of VCs + # Get number of current VCs and add/delete VCs appropriately + vsa = self.get(context, vsa_id) + vc_count = int(vc_count) + if vsa['vc_count'] != vc_count: + self.update_num_vcs(context, vsa, vc_count) + + return self.db.vsa_update(context, vsa_id, kwargs) + + def update_num_vcs(self, context, vsa, vc_count): + if vc_count > FLAGS.max_vcs_in_vsa: + LOG.warning(_("Requested number of VCs (%d) is too high."\ + " Setting to default"), vc_count) + vc_count = FLAGS.max_vcs_in_vsa + + old_vc_count = vsa['vc_count'] + if vc_count > old_vc_count: + LOG.debug(_("Adding %d VCs to VSA %s."), + (vc_count - old_vc_count, vsa['name'])) + # VP-TODO: actual code for adding new VCs + + elif vc_count < old_vc_count: + LOG.debug(_("Deleting %d VCs from VSA %s."), + (old_vc_count - vc_count, vsa['name'])) + # VP-TODO: actual code for deleting extra VCs + + def _force_volume_delete(self, ctxt, volume): + """Delete a volume, bypassing the check that it must be available.""" + host = volume['host'] + + if not host: + # Volume not yet assigned to host + # Deleting volume from database and skipping rpc. + self.db.volume_destroy(ctxt, volume['id']) + return + + rpc.cast(ctxt, + self.db.queue_get_for(ctxt, FLAGS.volume_topic, host), + {"method": "delete_volume", + "args": {"volume_id": volume['id']}}) + + def delete_be_volumes(self, context, vsa_id, force_delete=True): + + be_volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id) + for volume in be_volumes: + try: + vol_name = volume['name'] + LOG.info(_("VSA ID %(vsa_id)s: Deleting BE volume "\ + "%(vol_name)s"), locals()) + self.volume_api.delete(context, volume['id']) + except exception.ApiError: + LOG.info(_("Unable to delete volume %s"), volume['name']) + if force_delete: + LOG.info(_("VSA ID %(vsa_id)s: Forced delete. BE volume "\ + "%(vol_name)s"), locals()) + self._force_volume_delete(context, volume) + + def delete(self, context, vsa_id): + """Terminate a VSA instance.""" + LOG.info(_("Going to try to terminate VSA ID %s"), vsa_id) + + # allow deletion of volumes in "abnormal" state + + # Delete all FE volumes + fe_volumes = self.db.volume_get_all_assigned_from_vsa(context, vsa_id) + for volume in fe_volumes: + try: + vol_name = volume['name'] + LOG.info(_("VSA ID %(vsa_id)s: Deleting FE volume "\ + "%(vol_name)s"), locals()) + self.volume_api.delete(context, volume['id']) + except exception.ApiError: + LOG.info(_("Unable to delete volume %s"), volume['name']) + + # Delete all BE volumes + self.delete_be_volumes(context, vsa_id, force_delete=True) + + # Delete all VC instances + instances = self.db.instance_get_all_by_vsa(context, vsa_id) + for instance in instances: + name = instance['name'] + LOG.debug(_("VSA ID %(vsa_id)s: Delete instance %(name)s"), + locals()) + self.compute_api.delete(context, instance['id']) + + # Delete VSA instance + self.db.vsa_destroy(context, vsa_id) + + def get(self, context, vsa_id): + rv = self.db.vsa_get(context, vsa_id) + return rv + + def get_all(self, context): + if context.is_admin: + return self.db.vsa_get_all(context) + return self.db.vsa_get_all_by_project(context, context.project_id) + + def generate_user_data(self, context, vsa, volumes): + e_vsa = Element("vsa") + + e_vsa_detail = SubElement(e_vsa, "id") + e_vsa_detail.text = str(vsa['id']) + e_vsa_detail = SubElement(e_vsa, "name") + e_vsa_detail.text = vsa['display_name'] + e_vsa_detail = SubElement(e_vsa, "description") + e_vsa_detail.text = vsa['display_description'] + e_vsa_detail = SubElement(e_vsa, "vc_count") + e_vsa_detail.text = str(vsa['vc_count']) + + e_volumes = SubElement(e_vsa, "volumes") + for volume in volumes: + + loc = volume['provider_location'] + if loc is None: + ip = '' + iscsi_iqn = '' + iscsi_portal = '' + else: + (iscsi_target, _sep, iscsi_iqn) = loc.partition(" ") + (ip, iscsi_portal) = iscsi_target.split(":", 1) + + e_vol = SubElement(e_volumes, "volume") + e_vol_detail = SubElement(e_vol, "id") + e_vol_detail.text = str(volume['id']) + e_vol_detail = SubElement(e_vol, "name") + e_vol_detail.text = volume['name'] + e_vol_detail = SubElement(e_vol, "display_name") + e_vol_detail.text = volume['display_name'] + e_vol_detail = SubElement(e_vol, "size_gb") + e_vol_detail.text = str(volume['size']) + e_vol_detail = SubElement(e_vol, "status") + e_vol_detail.text = volume['status'] + e_vol_detail = SubElement(e_vol, "ip") + e_vol_detail.text = ip + e_vol_detail = SubElement(e_vol, "iscsi_iqn") + e_vol_detail.text = iscsi_iqn + e_vol_detail = SubElement(e_vol, "iscsi_portal") + e_vol_detail.text = iscsi_portal + e_vol_detail = SubElement(e_vol, "lun") + e_vol_detail.text = '0' + e_vol_detail = SubElement(e_vol, "sn_host") + e_vol_detail.text = volume['host'] + + _xml = ElementTree.tostring(e_vsa) + return base64.b64encode(_xml) diff --git a/nova/vsa/connection.py b/nova/vsa/connection.py new file mode 100644 index 000000000..6c61acee4 --- /dev/null +++ b/nova/vsa/connection.py @@ -0,0 +1,25 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Abstraction of the underlying connection to VC.""" + +from nova.vsa import fake + + +def get_connection(): + # Return an object that is able to talk to VCs + return fake.FakeVcConnection() diff --git a/nova/vsa/fake.py b/nova/vsa/fake.py new file mode 100644 index 000000000..308d21fec --- /dev/null +++ b/nova/vsa/fake.py @@ -0,0 +1,22 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class FakeVcConnection: + + def init_host(self, host): + pass diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py new file mode 100644 index 000000000..a9a9fa2e8 --- /dev/null +++ b/nova/vsa/manager.py @@ -0,0 +1,172 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all processes relating to Virtual Storage Arrays (VSA). + +**Related Flags** + +""" + +from nova import log as logging +from nova import manager +from nova import flags +from nova import utils +from nova import exception +from nova import compute +from nova import volume +from nova import vsa +from nova.compute import instance_types + + +FLAGS = flags.FLAGS +flags.DEFINE_string('vsa_driver', 'nova.vsa.connection.get_connection', + 'Driver to use for controlling VSAs') + +LOG = logging.getLogger('nova.vsa.manager') + + +class VsaManager(manager.SchedulerDependentManager): + """Manages Virtual Storage Arrays (VSAs).""" + + def __init__(self, vsa_driver=None, *args, **kwargs): + if not vsa_driver: + vsa_driver = FLAGS.vsa_driver + self.driver = utils.import_object(vsa_driver) + self.compute_manager = utils.import_object(FLAGS.compute_manager) + + self.compute_api = compute.API() + self.volume_api = volume.API() + self.vsa_api = vsa.API() + + super(VsaManager, self).__init__(*args, **kwargs) + + def init_host(self): + self.driver.init_host(host=self.host) + super(VsaManager, self).init_host() + + @exception.wrap_exception() + def create_vsa(self, context, vsa_id): + """Called by API if there were no BE volumes assigned""" + LOG.debug(_("Create call received for VSA %s"), vsa_id) + + vsa_id = int(vsa_id) # just in case + + try: + vsa = self.vsa_api.get(context, vsa_id) + except Exception as ex: + msg = _("Failed to find VSA %(vsa_id)d") % locals() + LOG.exception(msg) + return + + return self._start_vcs(context, vsa) + + @exception.wrap_exception() + def vsa_volume_created(self, context, vol_id, vsa_id, status): + """Callback for volume creations""" + LOG.debug(_("VSA ID %(vsa_id)s: Volume %(vol_id)s created. "\ + "Status %(status)s"), locals()) + vsa_id = int(vsa_id) # just in case + + # Get all volumes for this VSA + # check if any of them still in creating phase + volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id) + for volume in volumes: + if volume['status'] == 'creating': + vol_name = volume['name'] + vol_disp_name = volume['display_name'] + LOG.debug(_("Volume %(vol_name)s (%(vol_disp_name)s) still "\ + "in creating phase - wait"), locals()) + return + + try: + vsa = self.vsa_api.get(context, vsa_id) + except Exception as ex: + msg = _("Failed to find VSA %(vsa_id)d") % locals() + LOG.exception(msg) + return + + if len(volumes) != vsa['vol_count']: + LOG.debug(_("VSA ID %d: Not all volumes are created (%d of %d)"), + vsa_id, len(volumes), vsa['vol_count']) + return + + # all volumes created (successfully or not) + return self._start_vcs(context, vsa, volumes) + + def _start_vcs(self, context, vsa, volumes=[]): + """Start VCs for VSA """ + + vsa_id = vsa['id'] + if vsa['status'] == FLAGS.vsa_status_creating: + self.vsa_api.update_vsa_status(context, vsa_id, + FLAGS.vsa_status_launching) + else: + return + + # in _separate_ loop go over all volumes and mark as "attached" + has_failed_volumes = False + for volume in volumes: + vol_name = volume['name'] + vol_disp_name = volume['display_name'] + status = volume['status'] + LOG.info(_("VSA ID %(vsa_id)d: Volume %(vol_name)s "\ + "(%(vol_disp_name)s) is in %(status)s state"), + locals()) + if status == 'available': + try: + # self.volume_api.update(context, volume['id'], + # dict(attach_status="attached")) + pass + except Exception as ex: + msg = _("Failed to update attach status for volume " + "%(vol_name)s. %(ex)s") % locals() + LOG.exception(msg) + else: + has_failed_volumes = True + + if has_failed_volumes: + LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals()) + self.vsa_api.delete_be_volumes(context, vsa_id, force_delete=True) + self.vsa_api.update_vsa_status(context, vsa_id, + FLAGS.vsa_status_failed) + return + + # create user-data record for VC + storage_data = self.vsa_api.generate_user_data(context, vsa, volumes) + + instance_type = instance_types.get_instance_type( + vsa['instance_type_id']) + + # now start the VC instance + + vc_count = vsa['vc_count'] + LOG.info(_("VSA ID %(vsa_id)d: Start %(vc_count)d instances"), + locals()) + vc_instances = self.compute_api.create(context, + instance_type, # vsa['vsa_instance_type'], + vsa['image_ref'], + min_count=1, + max_count=vc_count, + display_name='vc-' + vsa['display_name'], + display_description='VC for VSA ' + vsa['display_name'], + availability_zone=vsa['availability_zone'], + user_data=storage_data, + vsa_id=vsa_id) + + self.vsa_api.update_vsa_status(context, vsa_id, + FLAGS.vsa_status_created) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent old mode 100755 new mode 100644 diff --git a/tools/clean-vlans b/tools/clean-vlans old mode 100755 new mode 100644 diff --git a/tools/nova-debug b/tools/nova-debug old mode 100755 new mode 100644 -- cgit From f6844960dd062154244c706283cf1916ee7194ff Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 15 Jul 2011 18:11:13 -0700 Subject: added missing instance_get_all_by_vsa --- nova/db/api.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nova/db/api.py b/nova/db/api.py index 9147f136b..fde229099 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -522,6 +522,11 @@ def instance_get_all_by_host(context, host): return IMPL.instance_get_all_by_host(context, host) +def instance_get_all_by_vsa(context, vsa_id): + """Get all instance belonging to a VSA.""" + return IMPL.instance_get_all_by_vsa(context, vsa_id) + + def instance_get_all_by_reservation(context, reservation_id): """Get all instance belonging to a reservation.""" return IMPL.instance_get_all_by_reservation(context, reservation_id) -- cgit From d340d7e90e245c79182906d603aec57d086cca1f Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 15 Jul 2011 18:25:37 -0700 Subject: added missing drive_types.py --- nova/vsa/drive_types.py | 106 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100644 nova/vsa/drive_types.py diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py new file mode 100644 index 000000000..b8cb66b22 --- /dev/null +++ b/nova/vsa/drive_types.py @@ -0,0 +1,106 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles all requests relating to Virtual Storage Arrays (VSAs). +""" + +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import quota +from nova import rpc +from nova.db import base + +from nova import compute +from nova import volume +from nova.compute import instance_types + + +FLAGS = flags.FLAGS +flags.DEFINE_string('drive_type_template_short', '%s_%sGB_%sRPM', + 'Template string for generation of drive type name') +flags.DEFINE_string('drive_type_template_long', '%s_%sGB_%sRPM_%s', + 'Template string for generation of drive type name') + + +LOG = logging.getLogger('nova.drive_types') + + +def _generate_default_drive_name(type, size_gb, rpm, capabilities): + if capabilities is None or capabilities == '': + return FLAGS.drive_type_template_short % \ + (type, str(size_gb), rpm) + else: + return FLAGS.drive_type_template_long % \ + (type, str(size_gb), rpm, capabilities) + + +def drive_type_create(context, type, size_gb, rpm, + capabilities='', visible=True, name=None): + if name is None: + name = _generate_default_drive_name(type, size_gb, rpm, + capabilities) + LOG.debug(_("Creating drive type %(name)s: "\ + "%(type)s %(size_gb)s %(rpm)s %(capabilities)s"), locals()) + + values = { + 'type': type, + 'size_gb': size_gb, + 'rpm': rpm, + 'capabilities': capabilities, + 'visible': visible, + 'name': name + } + return db.drive_type_create(context, values) + + +def drive_type_update(context, name, **kwargs): + LOG.debug(_("Updating drive type %(name)s: "), locals()) + return db.drive_type_update(context, name, kwargs) + + +def drive_type_rename(context, name, new_name=None): + + if new_name is None or \ + new_name == '': + disk = db.drive_type_get_by_name(context, name) + new_name = _generate_default_drive_name(disk['type'], + disk['size_gb'], disk['rpm'], disk['capabilities']) + + LOG.debug(_("Renaming drive type %(name)s to %(new_name)s"), locals()) + + values = dict(name=new_name) + return db.drive_type_update(context, name, values) + + +def drive_type_delete(context, name): + LOG.debug(_("Deleting drive type %(name)s"), locals()) + db.drive_type_destroy(context, name) + + +def drive_type_get(context, id): + return db.drive_type_get(context, id) + + +def drive_type_get_by_name(context, name): + return db.drive_type_get_by_name(context, name) + + +def drive_type_get_all(context, visible=None): + return db.drive_type_get_all(context, visible) -- cgit From cc7c1c49cb15d39445e94c248697d62f63a014a7 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Mon, 18 Jul 2011 08:59:00 -0700 Subject: Added auth info to XML --- nova/vsa/api.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nova/vsa/api.py b/nova/vsa/api.py index ed83ff563..853816477 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -368,6 +368,10 @@ class API(base.Base): e_vsa_detail.text = vsa['display_description'] e_vsa_detail = SubElement(e_vsa, "vc_count") e_vsa_detail.text = str(vsa['vc_count']) + e_vsa_detail = SubElement(e_vsa, "auth_user") + e_vsa_detail.text = str(context.user.name) + e_vsa_detail = SubElement(e_vsa, "auth_access_key") + e_vsa_detail.text = str(context.user.access) e_volumes = SubElement(e_vsa, "volumes") for volume in volumes: -- cgit From 15bbaf8bbdd48231f9ce98e4d8867b0477b44645 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Mon, 18 Jul 2011 09:57:31 -0700 Subject: localization changes. Removed vsa params from volume cloud API. Alex changes --- nova/api/ec2/cloud.py | 19 ++----------------- nova/scheduler/vsa.py | 8 ++++---- nova/vsa/api.py | 11 +++++++---- nova/vsa/drive_types.py | 8 -------- nova/vsa/manager.py | 6 ++++-- 5 files changed, 17 insertions(+), 35 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 786ceaccc..e31b755de 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -730,26 +730,12 @@ class CloudController(object): snapshot_id = None LOG.audit(_("Create volume of %s GB"), size, context=context) - to_vsa_id = kwargs.get('to_vsa_id', None) - if to_vsa_id: - to_vsa_id = ec2utils.ec2_id_to_id(to_vsa_id) - - from_vsa_id = kwargs.get('from_vsa_id', None) - if from_vsa_id: - from_vsa_id = ec2utils.ec2_id_to_id(from_vsa_id) - - if to_vsa_id or from_vsa_id: - LOG.audit(_("Create volume of %s GB associated with VSA "\ - "(to: %d, from: %d)"), - size, to_vsa_id, from_vsa_id, context=context) - volume = self.volume_api.create( context, size=size, snapshot_id=snapshot_id, name=kwargs.get('display_name'), - description=kwargs.get('display_description'), - to_vsa_id=to_vsa_id, from_vsa_id=from_vsa_id) + description=kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. @@ -864,8 +850,7 @@ class CloudController(object): def describe_vsas(self, context, vsa_id=None, status=None, availability_zone=None, **kwargs): -# LOG.debug(_("vsa_id=%s, status=%s, az=%s"), -# (vsa_id, status, availability_zone)) + LOG.audit(_("Describe VSAs")) result = [] vsas = [] if vsa_id is not None: diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index 4277c0ba8..260545746 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -96,8 +96,8 @@ class VsaScheduler(simple.SimpleScheduler): cap['cmp_func'] is not None and \ cap['cmp_func'](qos_values[cap['cap1']], drive_type[cap['cap2']]): - # LOG.debug(_("One of required capabilities found: %s:%s"), - # cap['cap1'], drive_type[cap['cap2']]) + # LOG.debug(("One of required capabilities found: %s:%s"), + # cap['cap1'], drive_type[cap['cap2']]) pass else: return False @@ -416,8 +416,8 @@ class VsaScheduler(simple.SimpleScheduler): drive_type = dict(drive_type) # otherwise - drive type is loaded - LOG.debug(_("Spawning volume %d with drive type %s"), - volume_ref['id'], drive_type) + LOG.debug(_("Spawning volume %(volume_id)s with drive type "\ + "%(drive_type)s"), locals()) LOG.debug(_("Service states BEFORE %s"), self.zone_manager.service_states) diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 853816477..7ce643aab 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -275,15 +275,18 @@ class API(base.Base): " Setting to default"), vc_count) vc_count = FLAGS.max_vcs_in_vsa + vsa_name = vsa['name'] old_vc_count = vsa['vc_count'] if vc_count > old_vc_count: - LOG.debug(_("Adding %d VCs to VSA %s."), - (vc_count - old_vc_count, vsa['name'])) + add_cnt = vc_count - old_vc_count + LOG.debug(_("Adding %(add_cnt)d VCs to VSA %(vsa_name)s."), + locals()) # VP-TODO: actual code for adding new VCs elif vc_count < old_vc_count: - LOG.debug(_("Deleting %d VCs from VSA %s."), - (old_vc_count - vc_count, vsa['name'])) + del_cnt = old_vc_count - vc_count + LOG.debug(_("Deleting %(add_cnt)d VCs from VSA %(vsa_name)s."), + locals()) # VP-TODO: actual code for deleting extra VCs def _force_volume_delete(self, ctxt, volume): diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py index b8cb66b22..781206cdf 100644 --- a/nova/vsa/drive_types.py +++ b/nova/vsa/drive_types.py @@ -23,14 +23,6 @@ from nova import db from nova import exception from nova import flags from nova import log as logging -from nova import quota -from nova import rpc -from nova.db import base - -from nova import compute -from nova import volume -from nova.compute import instance_types - FLAGS = flags.FLAGS flags.DEFINE_string('drive_type_template_short', '%s_%sGB_%sRPM', diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index a9a9fa2e8..c67358672 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -101,8 +101,10 @@ class VsaManager(manager.SchedulerDependentManager): return if len(volumes) != vsa['vol_count']: - LOG.debug(_("VSA ID %d: Not all volumes are created (%d of %d)"), - vsa_id, len(volumes), vsa['vol_count']) + cvol_real = len(volumes) + cvol_exp = vsa['vol_count'] + LOG.debug(_("VSA ID %(vsa_id)d: Not all volumes are created "\ + "(%(cvol_real)d of %(cvol_exp)d)"), locals()) return # all volumes created (successfully or not) -- cgit From 3983bca4c9528d286b4e154956ceb749b4875274 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Mon, 18 Jul 2011 14:00:19 -0700 Subject: VSA schedulers reorg --- bin/nova-api | 0 bin/nova-logspool | 0 bin/nova-manage | 2 + bin/nova-spoolsentry | 0 bin/nova-vncproxy | 0 nova/scheduler/vsa.py | 267 ++++++++++++++++++++++------------------- nova/scheduler/zone_manager.py | 4 +- 7 files changed, 149 insertions(+), 124 deletions(-) mode change 100644 => 100755 bin/nova-api mode change 100644 => 100755 bin/nova-logspool mode change 100644 => 100755 bin/nova-spoolsentry mode change 100644 => 100755 bin/nova-vncproxy diff --git a/bin/nova-api b/bin/nova-api old mode 100644 new mode 100755 diff --git a/bin/nova-logspool b/bin/nova-logspool old mode 100644 new mode 100755 diff --git a/bin/nova-manage b/bin/nova-manage index 4cf27ec8c..63db4ca56 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -1009,6 +1009,8 @@ class VsaDriveTypeCommands(object): """Methods for dealing with VSA drive types""" def __init__(self, *args, **kwargs): + self.controller = cloud.CloudController() + self.manager = manager.AuthManager() super(VsaDriveTypeCommands, self).__init__(*args, **kwargs) def _list(self, drives): diff --git a/bin/nova-spoolsentry b/bin/nova-spoolsentry old mode 100644 new mode 100755 diff --git a/bin/nova-vncproxy b/bin/nova-vncproxy old mode 100644 new mode 100755 diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index 260545746..c6517d9d5 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -41,16 +41,22 @@ flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10, flags.DEFINE_boolean('vsa_select_unique_drives', True, 'Allow selection of same host for multiple drives') +def BYTES_TO_GB(bytes): + return bytes >> FLAGS.gb_to_bytes_shift + +def GB_TO_BYTES(gb): + return gb << FLAGS.gb_to_bytes_shift + class VsaScheduler(simple.SimpleScheduler): - """Implements Naive Scheduler that tries to find least loaded host.""" + """Implements Scheduler for volume placement.""" def __init__(self, *args, **kwargs): super(VsaScheduler, self).__init__(*args, **kwargs) self._notify_all_volume_hosts("startup") def _notify_all_volume_hosts(self, event): - rpc.cast(context.get_admin_context(), + rpc.fanout_cast(context.get_admin_context(), FLAGS.volume_topic, {"method": "notification", "args": {"event": event}}) @@ -62,7 +68,7 @@ class VsaScheduler(simple.SimpleScheduler): return result def _compare_sizes_exact_match(self, cap_capacity, size_gb): - cap_capacity = int(cap_capacity) >> FLAGS.gb_to_bytes_shift + cap_capacity = BYTES_TO_GB(int(cap_capacity)) size_gb = int(size_gb) result = cap_capacity == size_gb # LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\ @@ -70,7 +76,7 @@ class VsaScheduler(simple.SimpleScheduler): return result def _compare_sizes_approxim(self, cap_capacity, size_gb): - cap_capacity = int(cap_capacity) >> FLAGS.gb_to_bytes_shift + cap_capacity = BYTES_TO_GB(int(cap_capacity)) size_gb = int(size_gb) size_perc = size_gb * FLAGS.drive_type_approx_capacity_percent / 100 @@ -106,7 +112,7 @@ class VsaScheduler(simple.SimpleScheduler): def _filter_hosts(self, topic, request_spec, host_list=None): drive_type = request_spec['drive_type'] - LOG.debug(_("Filter hosts for drive type %(drive_type)s") % locals()) + LOG.debug(_("Filter hosts for drive type %s"), drive_type['name']) if host_list is None: host_list = self.zone_manager.service_states.iteritems() @@ -121,14 +127,15 @@ class VsaScheduler(simple.SimpleScheduler): for qosgrp, qos_values in gos_info.iteritems(): if self._qosgrp_match(drive_type, qos_values): if qos_values['AvailableCapacity'] > 0: - LOG.debug(_("Adding host %s to the list"), host) + # LOG.debug(_("Adding host %s to the list"), host) filtered_hosts.append((host, gos_info)) else: LOG.debug(_("Host %s has no free capacity. Skip"), host) break - LOG.debug(_("Found hosts %(filtered_hosts)s") % locals()) + host_names = [item[0] for item in filtered_hosts] + LOG.debug(_("Filter hosts: %s"), host_names) return filtered_hosts def _allowed_to_use_host(self, host, selected_hosts, unique): @@ -142,104 +149,13 @@ class VsaScheduler(simple.SimpleScheduler): if host not in [item[0] for item in selected_hosts]: selected_hosts.append((host, cap)) - def _alg_least_used_host(self, request_spec, all_hosts, selected_hosts): - size = request_spec['size'] - drive_type = request_spec['drive_type'] - best_host = None - best_qoscap = None - best_cap = None - min_used = 0 - - LOG.debug(_("Selecting best host for %(size)sGB volume of type "\ - "%(drive_type)s from %(all_hosts)s"), locals()) - - for (host, capabilities) in all_hosts: - has_enough_capacity = False - used_capacity = 0 - for qosgrp, qos_values in capabilities.iteritems(): - - used_capacity = used_capacity + qos_values['TotalCapacity'] \ - - qos_values['AvailableCapacity'] - - if self._qosgrp_match(drive_type, qos_values): - # we found required qosgroup - - if size == 0: # full drive match - if qos_values['FullDrive']['NumFreeDrives'] > 0: - has_enough_capacity = True - matched_qos = qos_values - else: - break - else: - if qos_values['AvailableCapacity'] >= size and \ - (qos_values['PartitionDrive'][ - 'NumFreePartitions'] > 0 or \ - qos_values['FullDrive']['NumFreeDrives'] > 0): - has_enough_capacity = True - matched_qos = qos_values - else: - break - - if has_enough_capacity and \ - self._allowed_to_use_host(host, - selected_hosts, - unique) and \ - (best_host is None or used_capacity < min_used): - - min_used = used_capacity - best_host = host - best_qoscap = matched_qos - best_cap = capabilities - - if best_host: - self._add_hostcap_to_list(selected_hosts, host, best_cap) - LOG.debug(_("Best host found: %(best_host)s. "\ - "(used capacity %(min_used)s)"), locals()) - return (best_host, best_qoscap) - - def _alg_most_avail_capacity(self, request_spec, all_hosts, + def host_selection_algorithm(self, request_spec, all_hosts, selected_hosts, unique): - size = request_spec['size'] - drive_type = request_spec['drive_type'] - best_host = None - best_qoscap = None - best_cap = None - max_avail = 0 - - LOG.debug(_("Selecting best host for %(size)sGB volume of type "\ - "%(drive_type)s from %(all_hosts)s"), locals()) - - for (host, capabilities) in all_hosts: - for qosgrp, qos_values in capabilities.iteritems(): - if self._qosgrp_match(drive_type, qos_values): - # we found required qosgroup - - if size == 0: # full drive match - available = qos_values['FullDrive']['NumFreeDrives'] - else: - available = qos_values['AvailableCapacity'] - - if available > max_avail and \ - self._allowed_to_use_host(host, - selected_hosts, - unique): - max_avail = available - best_host = host - best_qoscap = qos_values - best_cap = capabilities - break # go to the next host - - if best_host: - self._add_hostcap_to_list(selected_hosts, host, best_cap) - LOG.debug(_("Best host found: %(best_host)s. "\ - "(available capacity %(max_avail)s)"), locals()) - - return (best_host, best_qoscap) + """Must override this method for VSA scheduler to work.""" + raise NotImplementedError(_("Must implement host selection mechanism")) def _select_hosts(self, request_spec, all_hosts, selected_hosts=None): - #self._alg_most_avail_capacity(request_spec, all_hosts, selected_hosts) - if selected_hosts is None: selected_hosts = [] @@ -249,7 +165,7 @@ class VsaScheduler(simple.SimpleScheduler): LOG.debug(_("Maximum number of hosts selected (%d)"), len(selected_hosts)) unique = False - (host, qos_cap) = self._alg_most_avail_capacity(request_spec, + (host, qos_cap) = self.host_selection_algorithm(request_spec, selected_hosts, selected_hosts, unique) @@ -262,12 +178,10 @@ class VsaScheduler(simple.SimpleScheduler): # if we've not tried yet (# of sel hosts < max) - unique=True # or failed to select from selected_hosts - unique=False # select from all hosts - (host, qos_cap) = self._alg_most_avail_capacity(request_spec, + (host, qos_cap) = self.host_selection_algorithm(request_spec, all_hosts, selected_hosts, unique) - LOG.debug(_("Selected host %(host)s"), locals()) - if host is None: raise driver.WillNotSchedule(_("No available hosts")) @@ -329,8 +243,11 @@ class VsaScheduler(simple.SimpleScheduler): LOG.debug(_("volume_params %(volume_params)s") % locals()) + i = 1 for vol in volume_params: - LOG.debug(_("Assigning host to volume %s") % vol['name']) + name = vol['name'] + LOG.debug(_("%(i)d: Volume %(name)s"), locals()) + i += 1 if forced_host: vol['host'] = forced_host @@ -352,22 +269,19 @@ class VsaScheduler(simple.SimpleScheduler): vol['capabilities'] = qos_cap self._consume_resource(qos_cap, vol['size'], -1) - LOG.debug(_("Assigned host %(host)s, capabilities %(qos_cap)s"), - locals()) - - LOG.debug(_("END: volume_params %(volume_params)s") % locals()) + # LOG.debug(_("Volume %(name)s assigned to host %(host)s"), locals()) def schedule_create_volumes(self, context, request_spec, availability_zone, *_args, **_kwargs): """Picks hosts for hosting multiple volumes.""" + LOG.debug(_("Service states BEFORE %s"), + self.zone_manager.service_states) + num_volumes = request_spec.get('num_volumes') LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") % locals()) - LOG.debug(_("Service states BEFORE %s"), - self.zone_manager.service_states) - vsa_id = request_spec.get('vsa_id') volume_params = request_spec.get('volumes') @@ -381,7 +295,6 @@ class VsaScheduler(simple.SimpleScheduler): LOG.debug(_("Service states AFTER %s"), self.zone_manager.service_states) - except: if vsa_id: db.vsa_update(context, vsa_id, @@ -415,13 +328,12 @@ class VsaScheduler(simple.SimpleScheduler): volume_id, *_args, **_kwargs) drive_type = dict(drive_type) - # otherwise - drive type is loaded - LOG.debug(_("Spawning volume %(volume_id)s with drive type "\ - "%(drive_type)s"), locals()) - LOG.debug(_("Service states BEFORE %s"), self.zone_manager.service_states) + LOG.debug(_("Spawning volume %(volume_id)s with drive type "\ + "%(drive_type)s"), locals()) + request_spec = {'size': volume_ref['size'], 'drive_type': drive_type} hosts = self._filter_hosts("volume", request_spec) @@ -487,9 +399,118 @@ class VsaScheduler(simple.SimpleScheduler): qos_values['DriveCapacity'] self._consume_full_drive(qos_values, direction) else: - qos_values['AvailableCapacity'] += direction * \ - (size << FLAGS.gb_to_bytes_shift) - self._consume_partition(qos_values, - size << FLAGS.gb_to_bytes_shift, - direction) + qos_values['AvailableCapacity'] += direction * GB_TO_BYTES(size) + self._consume_partition(qos_values, GB_TO_BYTES(size), direction) return + + +class VsaSchedulerLeastUsedHost(VsaScheduler): + """ + Implements VSA scheduler to select the host with least used capacity + of particular type. + """ + + def __init__(self, *args, **kwargs): + super(VsaSchedulerLeastUsedHost, self).__init__(*args, **kwargs) + + def host_selection_algorithm(self, request_spec, all_hosts, + selected_hosts, unique): + size = request_spec['size'] + drive_type = request_spec['drive_type'] + best_host = None + best_qoscap = None + best_cap = None + min_used = 0 + + for (host, capabilities) in all_hosts: + + has_enough_capacity = False + used_capacity = 0 + for qosgrp, qos_values in capabilities.iteritems(): + + used_capacity = used_capacity + qos_values['TotalCapacity'] \ + - qos_values['AvailableCapacity'] + + if self._qosgrp_match(drive_type, qos_values): + # we found required qosgroup + + if size == 0: # full drive match + if qos_values['FullDrive']['NumFreeDrives'] > 0: + has_enough_capacity = True + matched_qos = qos_values + else: + break + else: + if qos_values['AvailableCapacity'] >= size and \ + (qos_values['PartitionDrive'][ + 'NumFreePartitions'] > 0 or \ + qos_values['FullDrive']['NumFreeDrives'] > 0): + has_enough_capacity = True + matched_qos = qos_values + else: + break + + if has_enough_capacity and \ + self._allowed_to_use_host(host, + selected_hosts, + unique) and \ + (best_host is None or used_capacity < min_used): + + min_used = used_capacity + best_host = host + best_qoscap = matched_qos + best_cap = capabilities + + if best_host: + self._add_hostcap_to_list(selected_hosts, best_host, best_cap) + min_used = BYTES_TO_GB(min_used) + LOG.debug(_("\t LeastUsedHost: Best host: %(best_host)s. "\ + "(used capacity %(min_used)s)"), locals()) + return (best_host, best_qoscap) + + +class VsaSchedulerMostAvailCapacity(VsaScheduler): + """ + Implements VSA scheduler to select the host with most available capacity + of one particular type. + """ + + def __init__(self, *args, **kwargs): + super(VsaSchedulerMostAvailCapacity, self).__init__(*args, **kwargs) + + def host_selection_algorithm(self, request_spec, all_hosts, + selected_hosts, unique): + size = request_spec['size'] + drive_type = request_spec['drive_type'] + best_host = None + best_qoscap = None + best_cap = None + max_avail = 0 + + for (host, capabilities) in all_hosts: + for qosgrp, qos_values in capabilities.iteritems(): + if self._qosgrp_match(drive_type, qos_values): + # we found required qosgroup + + if size == 0: # full drive match + available = qos_values['FullDrive']['NumFreeDrives'] + else: + available = qos_values['AvailableCapacity'] + + if available > max_avail and \ + self._allowed_to_use_host(host, + selected_hosts, + unique): + max_avail = available + best_host = host + best_qoscap = qos_values + best_cap = capabilities + break # go to the next host + + if best_host: + self._add_hostcap_to_list(selected_hosts, best_host, best_cap) + type_str = "drives" if size == 0 else "bytes" + LOG.debug(_("\t MostAvailCap: Best host: %(best_host)s. "\ + "(available %(max_avail)s %(type_str)s)"), locals()) + + return (best_host, best_qoscap) diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index efdac06e1..b23bdbf85 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -196,8 +196,10 @@ class ZoneManager(object): def update_service_capabilities(self, service_name, host, capabilities): """Update the per-service capabilities based on this notification.""" + # logging.debug(_("Received %(service_name)s service update from " + # "%(host)s: %(capabilities)s") % locals()) logging.debug(_("Received %(service_name)s service update from " - "%(host)s: %(capabilities)s") % locals()) + "%(host)s") % locals()) service_caps = self.service_states.get(host, {}) capabilities["timestamp"] = utils.utcnow() # Reported time service_caps[service_name] = capabilities -- cgit From 9e74803d5eb8a70ba829ac0569f1cd6cd372a6f2 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 22 Jul 2011 15:14:29 -0700 Subject: Reverted volume driver part --- bin/nova-vsa | 49 ++++ nova/api/ec2/cloud.py | 19 +- nova/api/openstack/contrib/drive_types.py | 55 ++-- .../openstack/contrib/virtual_storage_arrays.py | 77 +++--- nova/db/api.py | 10 +- nova/db/sqlalchemy/api.py | 19 +- nova/scheduler/vsa.py | 5 +- .../api/openstack/contrib/test_drive_types.py | 192 +++++++++++++ nova/tests/api/openstack/contrib/test_vsa.py | 239 ++++++++++++++++ nova/tests/test_drive_types.py | 146 ++++++++++ nova/volume/driver.py | 220 +++++++++++++++ nova/volume/manager.py | 81 ++---- nova/volume/san.py | 308 --------------------- nova/vsa/api.py | 2 +- nova/vsa/drive_types.py | 27 +- 15 files changed, 987 insertions(+), 462 deletions(-) create mode 100755 bin/nova-vsa create mode 100644 nova/tests/api/openstack/contrib/test_drive_types.py create mode 100644 nova/tests/api/openstack/contrib/test_vsa.py create mode 100644 nova/tests/test_drive_types.py diff --git a/bin/nova-vsa b/bin/nova-vsa new file mode 100755 index 000000000..b15b7c7ed --- /dev/null +++ b/bin/nova-vsa @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Nova VSA.""" + +import eventlet +eventlet.monkey_patch() + +import gettext +import os +import sys + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + +gettext.install('nova', unicode=1) + +from nova import flags +from nova import log as logging +from nova import service +from nova import utils + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + service.serve() + service.wait() diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index e31b755de..7d0ce360f 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -889,12 +889,15 @@ class CloudController(object): "%(rpm)s %(capabilities)s %(visible)s"), locals()) - rv = drive_types.drive_type_create(context, type, size_gb, rpm, - capabilities, visible, name) + rv = drive_types.create(context, type, size_gb, rpm, + capabilities, visible, name) return {'driveTypeSet': [dict(rv)]} def update_drive_type(self, context, name, **kwargs): LOG.audit(_("Update Drive Type %s"), name) + + dtype = drive_types.get_by_name(context, name) + updatable_fields = ['type', 'size_gb', 'rpm', @@ -906,16 +909,18 @@ class CloudController(object): kwargs[field] is not None and \ kwargs[field] != '': changes[field] = kwargs[field] + if changes: - drive_types.drive_type_update(context, name, **changes) + drive_types.update(context, dtype['id'], **changes) return True def rename_drive_type(self, context, name, new_name): - drive_types.drive_type_rename(context, name, new_name) + drive_types.rename(context, name, new_name) return True def delete_drive_type(self, context, name): - drive_types.drive_type_delete(context, name) + dtype = drive_types.get_by_name(context, name) + drive_types.delete(context, dtype['id']) return True def describe_drive_types(self, context, names=None, visible=True): @@ -923,11 +928,11 @@ class CloudController(object): drives = [] if names is not None: for name in names: - drive = drive_types.drive_type_get_by_name(context, name) + drive = drive_types.get_by_name(context, name) if drive['visible'] == visible: drives.append(drive) else: - drives = drive_types.drive_type_get_all(context, visible) + drives = drive_types.get_all(context, visible) # VP-TODO: Change it later to EC2 compatible func (output) diff --git a/nova/api/openstack/contrib/drive_types.py b/nova/api/openstack/contrib/drive_types.py index 85b3170cb..590eaaec0 100644 --- a/nova/api/openstack/contrib/drive_types.py +++ b/nova/api/openstack/contrib/drive_types.py @@ -21,6 +21,7 @@ from webob import exc from nova.vsa import drive_types +from nova import exception from nova import db from nova import quota from nova import log as logging @@ -32,6 +33,19 @@ from nova.api.openstack import wsgi LOG = logging.getLogger("nova.api.drive_types") +def _drive_type_view(drive): + """Maps keys for drive types view.""" + d = {} + + d['id'] = drive['id'] + d['displayName'] = drive['name'] + d['type'] = drive['type'] + d['size'] = drive['size_gb'] + d['rpm'] = drive['rpm'] + d['capabilities'] = drive['capabilities'] + return d + + class DriveTypeController(object): """The Drive Type API controller for the OpenStack API.""" @@ -47,25 +61,13 @@ class DriveTypeController(object): "capabilities", ]}}} - def _drive_type_view(self, context, drive): - """Maps keys for drive types view.""" - d = {} - - d['id'] = drive['id'] - d['displayName'] = drive['name'] - d['type'] = drive['type'] - d['size'] = drive['size_gb'] - d['rpm'] = drive['rpm'] - d['capabilities'] = drive['capabilities'] - return d - def index(self, req): """Returns a list of drive types.""" context = req.environ['nova.context'] - drive_types = drive_types.drive_type_get_all(context) - limited_list = common.limited(drive_types, req) - res = [self._drive_type_view(context, drive) for drive in limited_list] + dtypes = drive_types.get_all(context) + limited_list = common.limited(dtypes, req) + res = [_drive_type_view(drive) for drive in limited_list] return {'drive_types': res} def show(self, req, id): @@ -73,11 +75,11 @@ class DriveTypeController(object): context = req.environ['nova.context'] try: - drive = drive_types.drive_type_get(context, id) + drive = drive_types.get(context, id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return {'drive_type': self._drive_type_view(context, drive)} + return {'drive_type': _drive_type_view(drive)} def create(self, req, body): """Creates a new drive type.""" @@ -97,14 +99,14 @@ class DriveTypeController(object): LOG.audit(_("Create drive type %(name)s for "\ "%(type)s:%(size)s:%(rpm)s"), locals(), context=context) - new_drive = drive_types.drive_type_create(context, - type=type, - size_gb=size, - rpm=rpm, - capabilities=capabilities, - name=name) + new_drive = drive_types.create(context, + type=type, + size_gb=size, + rpm=rpm, + capabilities=capabilities, + name=name) - return {'drive_type': self._drive_type_view(context, new_drive)} + return {'drive_type': _drive_type_view(new_drive)} def delete(self, req, id): """Deletes a drive type.""" @@ -113,11 +115,10 @@ class DriveTypeController(object): LOG.audit(_("Delete drive type with id: %s"), id, context=context) try: - drive = drive_types.drive_type_get(context, id) - drive_types.drive_type_delete(context, drive['name']) + drive_types.delete(context, id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return exc.HTTPAccepted() + # return exc.HTTPAccepted() class Drive_types(extensions.ExtensionDescriptor): diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index eca2d68dd..3c1362f0c 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -39,6 +39,29 @@ FLAGS = flags.FLAGS LOG = logging.getLogger("nova.api.vsa") +def _vsa_view(context, vsa, details=False): + """Map keys for vsa summary/detailed view.""" + d = {} + + d['id'] = vsa.get('id') + d['name'] = vsa.get('name') + d['displayName'] = vsa.get('display_name') + d['displayDescription'] = vsa.get('display_description') + + d['createTime'] = vsa.get('created_at') + d['status'] = vsa.get('status') + + if 'vsa_instance_type' in vsa: + d['vcType'] = vsa['vsa_instance_type'].get('name', None) + else: + d['vcType'] = None + + d['vcCount'] = vsa.get('vc_count') + d['driveCount'] = vsa.get('vol_count') + + return d + + class VsaController(object): """The Virtual Storage Array API controller for the OpenStack API.""" @@ -61,34 +84,12 @@ class VsaController(object): self.vsa_api = vsa.API() super(VsaController, self).__init__() - def _vsa_view(self, context, vsa, details=False): - """Map keys for vsa summary/detailed view.""" - d = {} - - d['id'] = vsa['id'] - d['name'] = vsa['name'] - d['displayName'] = vsa['display_name'] - d['displayDescription'] = vsa['display_description'] - - d['createTime'] = vsa['created_at'] - d['status'] = vsa['status'] - - if vsa['vsa_instance_type']: - d['vcType'] = vsa['vsa_instance_type'].get('name', None) - else: - d['vcType'] = None - - d['vcCount'] = vsa['vc_count'] - d['driveCount'] = vsa['vol_count'] - - return d - def _items(self, req, details): """Return summary or detailed list of VSAs.""" context = req.environ['nova.context'] vsas = self.vsa_api.get_all(context) limited_list = common.limited(vsas, req) - res = [self._vsa_view(context, vsa, details) for vsa in limited_list] + res = [_vsa_view(context, vsa, details) for vsa in limited_list] return {'vsaSet': res} def index(self, req): @@ -108,24 +109,20 @@ class VsaController(object): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return {'vsa': self._vsa_view(context, vsa, details=True)} + return {'vsa': _vsa_view(context, vsa, details=True)} def create(self, req, body): """Create a new VSA.""" context = req.environ['nova.context'] - if not body: + if not body or 'vsa' not in body: + LOG.debug(_("No body provided"), context=context) return faults.Fault(exc.HTTPUnprocessableEntity()) vsa = body['vsa'] display_name = vsa.get('displayName') - display_description = vsa.get('displayDescription') - storage = vsa.get('storage') - shared = vsa.get('shared') vc_type = vsa.get('vcType', FLAGS.default_vsa_instance_type) - availability_zone = vsa.get('placement', {}).get('AvailabilityZone') - try: instance_type = instance_types.get_instance_type_by_name(vc_type) except exception.NotFound: @@ -134,15 +131,17 @@ class VsaController(object): LOG.audit(_("Create VSA %(display_name)s of type %(vc_type)s"), locals(), context=context) - result = self.vsa_api.create(context, - display_name=display_name, - display_description=display_description, - storage=storage, - shared=shared, - instance_type=instance_type, - availability_zone=availability_zone) + args = dict(display_name=display_name, + display_description=vsa.get('displayDescription'), + instance_type=instance_type, + storage=vsa.get('storage'), + shared=vsa.get('shared'), + availability_zone=vsa.get('placement', {}).\ + get('AvailabilityZone')) - return {'vsa': self._vsa_view(context, result, details=True)} + result = self.vsa_api.create(context, **args) + + return {'vsa': _vsa_view(context, result, details=True)} def delete(self, req, id): """Delete a VSA.""" @@ -154,7 +153,7 @@ class VsaController(object): self.vsa_api.delete(context, vsa_id=id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return exc.HTTPAccepted() + # return exc.HTTPAccepted() class VsaVolumeDriveController(volumes.VolumeController): diff --git a/nova/db/api.py b/nova/db/api.py index fde229099..a3a6d47c4 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -1455,14 +1455,14 @@ def drive_type_create(context, values): return IMPL.drive_type_create(context, values) -def drive_type_update(context, name, values): +def drive_type_update(context, drive_type_id, values): """Updates drive type record.""" - return IMPL.drive_type_update(context, name, values) + return IMPL.drive_type_update(context, drive_type_id, values) -def drive_type_destroy(context, name): +def drive_type_destroy(context, drive_type_id): """Deletes drive type record.""" - return IMPL.drive_type_destroy(context, name) + return IMPL.drive_type_destroy(context, drive_type_id) def drive_type_get(context, drive_type_id): @@ -1475,7 +1475,7 @@ def drive_type_get_by_name(context, name): return IMPL.drive_type_get_by_name(context, name) -def drive_type_get_all(context, visible=None): +def drive_type_get_all(context, visible): """Returns all (or only visible) drive types.""" return IMPL.drive_type_get_all(context, visible) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index aa5a6e052..c08524265 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3364,31 +3364,30 @@ def drive_type_create(context, values): @require_admin_context -def drive_type_update(context, name, values): +def drive_type_update(context, drive_type_id, values): """ Updates drive type record. """ session = get_session() with session.begin(): - drive_type_ref = drive_type_get_by_name(context, name, session=session) + drive_type_ref = drive_type_get(context, drive_type_id, + session=session) drive_type_ref.update(values) drive_type_ref.save(session=session) return drive_type_ref @require_admin_context -def drive_type_destroy(context, name): +def drive_type_destroy(context, drive_type_id): """ Deletes drive type record. """ session = get_session() drive_type_ref = session.query(models.DriveTypes).\ - filter_by(name=name) + filter_by(id=drive_type_id) records = drive_type_ref.delete() if records == 0: - raise exception.VirtualDiskTypeNotFoundByName(name=name) - else: - return drive_type_ref + raise exception.VirtualDiskTypeNotFound(id=drive_type_id) @require_context @@ -3428,20 +3427,20 @@ def drive_type_get_by_name(context, name, session=None): @require_context -def drive_type_get_all(context, visible=False): +def drive_type_get_all(context, visible): """ Returns all (or only visible) drive types. """ session = get_session() - if not visible: + if visible: drive_types = session.query(models.DriveTypes).\ filter_by(deleted=can_read_deleted(context)).\ + filter_by(visible=True).\ order_by("name").\ all() else: drive_types = session.query(models.DriveTypes).\ filter_by(deleted=can_read_deleted(context)).\ - filter_by(visible=True).\ order_by("name").\ all() return drive_types diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index c6517d9d5..059afce68 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -41,9 +41,11 @@ flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10, flags.DEFINE_boolean('vsa_select_unique_drives', True, 'Allow selection of same host for multiple drives') + def BYTES_TO_GB(bytes): return bytes >> FLAGS.gb_to_bytes_shift + def GB_TO_BYTES(gb): return gb << FLAGS.gb_to_bytes_shift @@ -269,7 +271,8 @@ class VsaScheduler(simple.SimpleScheduler): vol['capabilities'] = qos_cap self._consume_resource(qos_cap, vol['size'], -1) - # LOG.debug(_("Volume %(name)s assigned to host %(host)s"), locals()) + # LOG.debug(_("Volume %(name)s assigned to host %(host)s"), + # locals()) def schedule_create_volumes(self, context, request_spec, availability_zone, *_args, **_kwargs): diff --git a/nova/tests/api/openstack/contrib/test_drive_types.py b/nova/tests/api/openstack/contrib/test_drive_types.py new file mode 100644 index 000000000..2f7d327d3 --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_drive_types.py @@ -0,0 +1,192 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import webob + +#from nova import compute +from nova.vsa import drive_types +from nova import exception +from nova import context +from nova import test +from nova import log as logging +from nova.tests.api.openstack import fakes + +from nova.api.openstack.contrib.drive_types import _drive_type_view + +LOG = logging.getLogger('nova.tests.api.openstack.drive_types') + +last_param = {} + + +def _get_default_drive_type(): + param = { + 'name': 'Test drive type', + 'type': 'SATA', + 'size_gb': 123, + 'rpm': '7200', + 'capabilities': '', + 'visible': True + } + return param + + +def _create(context, **param): + global last_param + LOG.debug(_("_create: %s"), param) + param['id'] = 123 + last_param = param + return param + + +def _delete(context, id): + global last_param + last_param = dict(id=id) + + LOG.debug(_("_delete: %s"), locals()) + + +def _get(context, id): + global last_param + last_param = dict(id=id) + + LOG.debug(_("_get: %s"), locals()) + if id != '123': + raise exception.NotFound + + dtype = _get_default_drive_type() + dtype['id'] = id + return dtype + + +def _get_all(context, visible=True): + LOG.debug(_("_get_all: %s"), locals()) + dtype = _get_default_drive_type() + dtype['id'] = 123 + return [dtype] + + +class DriveTypesApiTest(test.TestCase): + def setUp(self): + super(DriveTypesApiTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + self.stubs.Set(drive_types, "create", _create) + self.stubs.Set(drive_types, "delete", _delete) + self.stubs.Set(drive_types, "get", _get) + self.stubs.Set(drive_types, "get_all", _get_all) + + self.context = context.get_admin_context() + + def tearDown(self): + self.stubs.UnsetAll() + super(DriveTypesApiTest, self).tearDown() + + def test_drive_types_api_create(self): + global last_param + last_param = {} + + dtype = _get_default_drive_type() + dtype['id'] = 123 + + body = dict(drive_type=_drive_type_view(dtype)) + req = webob.Request.blank('/v1.1/zadr-drive_types') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + # Compare if parameters were correctly passed to stub + for k, v in last_param.iteritems(): + self.assertEqual(last_param[k], dtype[k]) + + resp_dict = json.loads(resp.body) + + # Compare response + self.assertTrue('drive_type' in resp_dict) + resp_dtype = resp_dict['drive_type'] + self.assertEqual(resp_dtype, _drive_type_view(dtype)) + + def test_drive_types_api_delete(self): + global last_param + last_param = {} + + dtype_id = 123 + req = webob.Request.blank('/v1.1/zadr-drive_types/%d' % dtype_id) + req.method = 'DELETE' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + self.assertEqual(str(last_param['id']), str(dtype_id)) + + def test_drive_types_show(self): + global last_param + last_param = {} + + dtype_id = 123 + req = webob.Request.blank('/v1.1/zadr-drive_types/%d' % dtype_id) + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + self.assertEqual(str(last_param['id']), str(dtype_id)) + + resp_dict = json.loads(resp.body) + + # Compare response + self.assertTrue('drive_type' in resp_dict) + resp_dtype = resp_dict['drive_type'] + exp_dtype = _get_default_drive_type() + exp_dtype['id'] = dtype_id + exp_dtype_view = _drive_type_view(exp_dtype) + for k, v in exp_dtype_view.iteritems(): + self.assertEqual(str(resp_dtype[k]), str(exp_dtype_view[k])) + + def test_drive_types_show_invalid_id(self): + global last_param + last_param = {} + + dtype_id = 234 + req = webob.Request.blank('/v1.1/zadr-drive_types/%d' % dtype_id) + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + self.assertEqual(str(last_param['id']), str(dtype_id)) + + def test_drive_types_index(self): + + req = webob.Request.blank('/v1.1/zadr-drive_types') + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + + self.assertTrue('drive_types' in resp_dict) + resp_dtypes = resp_dict['drive_types'] + self.assertEqual(len(resp_dtypes), 1) + + resp_dtype = resp_dtypes.pop() + exp_dtype = _get_default_drive_type() + exp_dtype['id'] = 123 + exp_dtype_view = _drive_type_view(exp_dtype) + for k, v in exp_dtype_view.iteritems(): + self.assertEqual(str(resp_dtype[k]), str(exp_dtype_view[k])) diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py new file mode 100644 index 000000000..bc0b7eaa6 --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_vsa.py @@ -0,0 +1,239 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import webob + +#from nova import compute +from nova import vsa +from nova import exception +from nova import context +from nova import test +from nova import log as logging +from nova.tests.api.openstack import fakes + +from nova.api.openstack.contrib.virtual_storage_arrays import _vsa_view + +LOG = logging.getLogger('nova.tests.api.openstack.vsa') + +last_param = {} + + +def _get_default_vsa_param(): + return { + 'display_name': 'Test_VSA_name', + 'display_description': 'Test_VSA_description', + 'vc_count': 1, + 'instance_type': 'm1.small', + 'image_name': None, + 'availability_zone': None, + 'storage': [], + 'shared': False + } + + +def stub_vsa_create(self, context, **param): + global last_param + LOG.debug(_("_create: param=%s"), param) + param['id'] = 123 + param['name'] = 'Test name' + last_param = param + return param + + +def stub_vsa_delete(self, context, vsa_id): + global last_param + last_param = dict(vsa_id=vsa_id) + + LOG.debug(_("_delete: %s"), locals()) + if vsa_id != '123': + raise exception.NotFound + + +def stub_vsa_get(self, context, vsa_id): + global last_param + last_param = dict(vsa_id=vsa_id) + + LOG.debug(_("_get: %s"), locals()) + if vsa_id != '123': + raise exception.NotFound + + param = _get_default_vsa_param() + param['id'] = vsa_id + return param + + +def stub_vsa_get_all(self, context): + LOG.debug(_("_get_all: %s"), locals()) + param = _get_default_vsa_param() + param['id'] = 123 + return [param] + + +class VSAApiTest(test.TestCase): + def setUp(self): + super(VSAApiTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + self.stubs.Set(vsa.api.API, "create", stub_vsa_create) + self.stubs.Set(vsa.api.API, "delete", stub_vsa_delete) + self.stubs.Set(vsa.api.API, "get", stub_vsa_get) + self.stubs.Set(vsa.api.API, "get_all", stub_vsa_get_all) + + self.context = context.get_admin_context() + + def tearDown(self): + self.stubs.UnsetAll() + super(VSAApiTest, self).tearDown() + + def test_vsa_api_create(self): + global last_param + last_param = {} + + vsa = {"displayName": "VSA Test Name", + "displayDescription": "VSA Test Desc"} + body = dict(vsa=vsa) + req = webob.Request.blank('/v1.1/zadr-vsa') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + # Compare if parameters were correctly passed to stub + self.assertEqual(last_param['display_name'], "VSA Test Name") + self.assertEqual(last_param['display_description'], "VSA Test Desc") + + resp_dict = json.loads(resp.body) + self.assertTrue('vsa' in resp_dict) + self.assertEqual(resp_dict['vsa']['displayName'], vsa['displayName']) + self.assertEqual(resp_dict['vsa']['displayDescription'], + vsa['displayDescription']) + + def test_vsa_api_create_no_body(self): + req = webob.Request.blank('/v1.1/zadr-vsa') + req.method = 'POST' + req.body = json.dumps({}) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 422) + + def test_vsa_api_delete(self): + global last_param + last_param = {} + + vsa_id = 123 + req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req.method = 'DELETE' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + def test_vsa_api_delete_invalid_id(self): + global last_param + last_param = {} + + vsa_id = 234 + req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req.method = 'DELETE' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + def test_vsa_api_show(self): + global last_param + last_param = {} + + vsa_id = 123 + req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + resp_dict = json.loads(resp.body) + self.assertTrue('vsa' in resp_dict) + self.assertEqual(resp_dict['vsa']['id'], str(vsa_id)) + + def test_vsa_api_show_invalid_id(self): + global last_param + last_param = {} + + vsa_id = 234 + req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) + + def test_vsa_api_index(self): + req = webob.Request.blank('/v1.1/zadr-vsa') + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + + self.assertTrue('vsaSet' in resp_dict) + resp_vsas = resp_dict['vsaSet'] + self.assertEqual(len(resp_vsas), 1) + + resp_vsa = resp_vsas.pop() + self.assertEqual(resp_vsa['id'], 123) + + def test_vsa_api_detail(self): + req = webob.Request.blank('/v1.1/zadr-vsa/detail') + req.method = 'GET' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + + self.assertTrue('vsaSet' in resp_dict) + resp_vsas = resp_dict['vsaSet'] + self.assertEqual(len(resp_vsas), 1) + + resp_vsa = resp_vsas.pop() + self.assertEqual(resp_vsa['id'], 123) + + +class VSAVolumeDriveApiTest(test.TestCase): + def setUp(self): + super(VSAVolumeDriveApiTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + self.stubs.Set(vsa.api.API, "create", stub_vsa_create) + self.stubs.Set(vsa.api.API, "delete", stub_vsa_delete) + self.stubs.Set(vsa.api.API, "get", stub_vsa_get) + self.stubs.Set(vsa.api.API, "get_all", stub_vsa_get_all) + + self.context = context.get_admin_context() + + def tearDown(self): + self.stubs.UnsetAll() + super(VSAVolumeDriveApiTest, self).tearDown() diff --git a/nova/tests/test_drive_types.py b/nova/tests/test_drive_types.py new file mode 100644 index 000000000..8534bcde5 --- /dev/null +++ b/nova/tests/test_drive_types.py @@ -0,0 +1,146 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Unit Tests for drive types codecode +""" +import time + +from nova import context +from nova import flags +from nova import log as logging +from nova import test +from nova.vsa import drive_types + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.vsa') + + +class DriveTypesTestCase(test.TestCase): + """Test cases for driver types code""" + def setUp(self): + super(DriveTypesTestCase, self).setUp() + self.cntx = context.RequestContext(None, None) + self.cntx_admin = context.get_admin_context() + self._dtype = self._create_drive_type() + + def tearDown(self): + self._dtype = None + + def _create_drive_type(self): + """Create a volume object.""" + dtype = {} + dtype['type'] = 'SATA' + dtype['size_gb'] = 150 + dtype['rpm'] = 5000 + dtype['capabilities'] = None + dtype['visible'] = True + + LOG.debug(_("Drive Type created %s"), dtype) + return dtype + + def test_drive_type_create_delete(self): + dtype = self._dtype + prev_all_dtypes = drive_types.get_all(self.cntx_admin, False) + + new = drive_types.create(self.cntx_admin, **dtype) + for k, v in dtype.iteritems(): + self.assertEqual(v, new[k], 'one of fields doesnt match') + + new_all_dtypes = drive_types.get_all(self.cntx_admin, False) + self.assertNotEqual(len(prev_all_dtypes), + len(new_all_dtypes), + 'drive type was not created') + + drive_types.delete(self.cntx_admin, new['id']) + new_all_dtypes = drive_types.get_all(self.cntx_admin, False) + self.assertEqual(prev_all_dtypes, + new_all_dtypes, + 'drive types was not deleted') + + def test_drive_type_check_name_generation(self): + dtype = self._dtype + new = drive_types.create(self.cntx_admin, **dtype) + expected_name = FLAGS.drive_type_template_short % \ + (dtype['type'], dtype['size_gb'], dtype['rpm']) + self.assertEqual(new['name'], expected_name, + 'name was not generated correctly') + + dtype['capabilities'] = 'SEC' + new2 = drive_types.create(self.cntx_admin, **dtype) + expected_name = FLAGS.drive_type_template_long % \ + (dtype['type'], dtype['size_gb'], dtype['rpm'], + dtype['capabilities']) + self.assertEqual(new2['name'], expected_name, + 'name was not generated correctly') + + drive_types.delete(self.cntx_admin, new['id']) + drive_types.delete(self.cntx_admin, new2['id']) + + def test_drive_type_create_delete_invisible(self): + dtype = self._dtype + dtype['visible'] = False + prev_all_dtypes = drive_types.get_all(self.cntx_admin, True) + new = drive_types.create(self.cntx_admin, **dtype) + + new_all_dtypes = drive_types.get_all(self.cntx_admin, True) + self.assertEqual(prev_all_dtypes, new_all_dtypes) + + new_all_dtypes = drive_types.get_all(self.cntx_admin, False) + self.assertNotEqual(prev_all_dtypes, new_all_dtypes) + + drive_types.delete(self.cntx_admin, new['id']) + + def test_drive_type_rename_update(self): + dtype = self._dtype + dtype['capabilities'] = None + + new = drive_types.create(self.cntx_admin, **dtype) + for k, v in dtype.iteritems(): + self.assertEqual(v, new[k], 'one of fields doesnt match') + + new_name = 'NEW_DRIVE_NAME' + new = drive_types.rename(self.cntx_admin, new['name'], new_name) + self.assertEqual(new['name'], new_name) + + new = drive_types.rename(self.cntx_admin, new_name) + expected_name = FLAGS.drive_type_template_short % \ + (dtype['type'], dtype['size_gb'], dtype['rpm']) + self.assertEqual(new['name'], expected_name) + + changes = {'rpm': 7200} + new = drive_types.update(self.cntx_admin, new['id'], **changes) + for k, v in changes.iteritems(): + self.assertEqual(v, new[k], 'one of fields doesnt match') + + drive_types.delete(self.cntx_admin, new['id']) + + def test_drive_type_get(self): + dtype = self._dtype + new = drive_types.create(self.cntx_admin, **dtype) + + new2 = drive_types.get(self.cntx_admin, new['id']) + for k, v in new2.iteritems(): + self.assertEqual(str(new[k]), str(new2[k]), + 'one of fields doesnt match') + + new2 = drive_types.get_by_name(self.cntx_admin, new['name']) + for k, v in new.iteritems(): + self.assertEqual(str(new[k]), str(new2[k]), + 'one of fields doesnt match') + + drive_types.delete(self.cntx_admin, new['id']) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index ec09325d8..b93fc1d92 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -22,6 +22,7 @@ Drivers for volumes. import time import os +from xml.etree import ElementTree from nova import exception from nova import flags @@ -208,6 +209,11 @@ class VolumeDriver(object): """Make sure volume is exported.""" raise NotImplementedError() + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. If 'refresh' is + True, run the update first.""" + return None + class AOEDriver(VolumeDriver): """Implements AOE specific volume commands.""" @@ -809,3 +815,217 @@ class LoggingVolumeDriver(VolumeDriver): if match: matches.append(entry) return matches + + +class ZadaraBEDriver(ISCSIDriver): + """Performs actions to configure Zadara BE module.""" + + def _not_vsa_be_volume(self, volume): + """Returns True if volume is not VSA BE volume.""" + if volume['to_vsa_id'] is None: + LOG.debug(_("\tVolume %s is NOT VSA volume"), volume['name']) + return True + else: + return False + + def check_for_setup_error(self): + """No setup necessary for Zadara BE.""" + pass + + """ Volume Driver methods """ + def create_volume(self, volume): + """Creates BE volume.""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).create_volume(volume) + + if int(volume['size']) == 0: + sizestr = '0' # indicates full-partition + else: + sizestr = '%s' % (int(volume['size']) << 30) # size in bytes + + # Set the qos-str to default type sas + qosstr = 'SAS_1000' + drive_type = volume.get('drive_type') + if drive_type is not None: + qosstr = drive_type['type'] + ("_%s" % drive_type['size_gb']) + + try: + self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + 'create_qospart', + '--qos', qosstr, + '--pname', volume['name'], + '--psize', sizestr, + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("VSA BE create_volume for %s failed"), volume['name']) + raise + + LOG.debug(_("VSA BE create_volume for %s succeeded"), volume['name']) + + def delete_volume(self, volume): + """Deletes BE volume.""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).delete_volume(volume) + + try: + self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + 'delete_partition', + '--pname', volume['name'], + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name']) + return + + LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name']) + + def local_path(self, volume): + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).local_path(volume) + + raise exception.Error(_("local_path not supported")) + + def ensure_export(self, context, volume): + """ensure BE export for a volume""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).ensure_export(context, volume) + + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping ensure_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + + try: + ret = self._common_be_export(context, volume, iscsi_target) + except exception.ProcessExecutionError: + return + return ret + + def create_export(self, context, volume): + """create BE export for a volume""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).create_export(context, volume) + + self._ensure_iscsi_targets(context, volume['host']) + iscsi_target = self.db.volume_allocate_iscsi_target(context, + volume['id'], + volume['host']) + try: + ret = self._common_be_export(context, volume, iscsi_target) + except: + raise exception.ProcessExecutionError + + def remove_export(self, context, volume): + """Removes BE export for a volume.""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).remove_export(context, volume) + + try: + iscsi_target = self.db.volume_get_iscsi_target_num(context, + volume['id']) + except exception.NotFound: + LOG.info(_("Skipping remove_export. No iscsi_target " + + "provisioned for volume: %d"), volume['id']) + return + + try: + self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + 'remove_export', + '--pname', volume['name'], + '--tid', iscsi_target, + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("VSA BE remove_export for %s failed"), volume['name']) + return + + def create_snapshot(self, snapshot): + """Nothing required for snapshot""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).create_snapshot(volume) + + pass + + def delete_snapshot(self, snapshot): + """Nothing required to delete a snapshot""" + if self._not_vsa_be_volume(volume): + return super(ZadaraBEDriver, self).delete_snapshot(volume) + + pass + + """ Internal BE Volume methods """ + def _common_be_export(self, context, volume, iscsi_target): + """ + Common logic that asks zadara_sncfg to setup iSCSI target/lun for + this volume + """ + (out, err) = self._sync_exec('sudo', + '/var/lib/zadara/bin/zadara_sncfg', + 'create_export', + '--pname', volume['name'], + '--tid', iscsi_target, + check_exit_code=0) + + result_xml = ElementTree.fromstring(out) + response_node = result_xml.find("Sn") + if response_node is None: + msg = "Malformed response from zadara_sncfg" + raise exception.Error(msg) + + sn_ip = response_node.findtext("SnIp") + sn_iqn = response_node.findtext("IqnName") + iscsi_portal = sn_ip + ":3260," + ("%s" % iscsi_target) + + model_update = {} + model_update['provider_location'] = ("%s %s" % + (iscsi_portal, + sn_iqn)) + return model_update + + def _get_qosgroup_summary(self): + """gets the list of qosgroups from Zadara BE""" + try: + (out, err) = self._sync_exec('sudo', + '/var/lib/zadara/bin/zadara_sncfg', + 'get_qosgroups_xml', + check_exit_code=0) + except exception.ProcessExecutionError: + LOG.debug(_("Failed to retrieve QoS info")) + return {} + + qos_groups = {} + result_xml = ElementTree.fromstring(out) + for element in result_xml.findall('QosGroup'): + qos_group = {} + # get the name of the group. + # If we cannot find it, forget this element + group_name = element.findtext("Name") + if not group_name: + continue + + # loop through all child nodes & fill up attributes of this group + for child in element.getchildren(): + # two types of elements - property of qos-group & sub property + # classify them accordingly + if child.text: + qos_group[child.tag] = int(child.text) \ + if child.text.isdigit() else child.text + else: + subelement = {} + for subchild in child.getchildren(): + subelement[subchild.tag] = int(subchild.text) \ + if subchild.text.isdigit() else subchild.text + qos_group[child.tag] = subelement + + # Now add this group to the master qos_groups + qos_groups[group_name] = qos_group + + return qos_groups + + def get_volume_stats(self, refresh=False): + """Return the current state of the volume service. If 'refresh' is + True, run the update first.""" + + drive_info = self._get_qosgroup_summary() + return {'drive_qos_info': drive_info} diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 3e2892fee..d2c36e96f 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -42,7 +42,7 @@ intact. """ -import time +# import time from nova import context from nova import exception @@ -60,41 +60,27 @@ flags.DEFINE_string('storage_availability_zone', 'availability zone of this service') flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver', 'Driver to use for volume creation') -flags.DEFINE_string('vsa_volume_driver', 'nova.volume.san.ZadaraVsaDriver', - 'Driver to use for FE/BE volume creation with VSA') flags.DEFINE_boolean('use_local_volumes', True, 'if True, will not discover local volumes') -flags.DEFINE_integer('volume_state_interval', 60, - 'Interval in seconds for querying volumes status') +# flags.DEFINE_integer('volume_state_interval', 60, +# 'Interval in seconds for querying volumes status') class VolumeManager(manager.SchedulerDependentManager): """Manages attachable block storage devices.""" - def __init__(self, volume_driver=None, vsa_volume_driver=None, - *args, **kwargs): + def __init__(self, volume_driver=None, *args, **kwargs): """Load the driver from the one specified in args, or from flags.""" if not volume_driver: volume_driver = FLAGS.volume_driver self.driver = utils.import_object(volume_driver) - if not vsa_volume_driver: - vsa_volume_driver = FLAGS.vsa_volume_driver - self.vsadriver = utils.import_object(vsa_volume_driver) super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) # NOTE(vish): Implementation specific db handling is done # by the driver. self.driver.db = self.db - self.vsadriver.db = self.db self._last_volume_stats = [] #self._last_host_check = 0 - def _get_driver(self, volume_ref): - if volume_ref['to_vsa_id'] is None and \ - volume_ref['from_vsa_id'] is None: - return self.driver - else: - return self.vsadriver - def init_host(self): """Do any initialization that needs to be run if this is a standalone service.""" @@ -104,8 +90,7 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.debug(_("Re-exporting %s volumes"), len(volumes)) for volume in volumes: if volume['status'] in ['available', 'in-use']: - driver = self._get_driver(volume) - driver.ensure_export(ctxt, volume) + self.driver.ensure_export(ctxt, volume) else: LOG.info(_("volume %s: skipping export"), volume['name']) @@ -126,28 +111,26 @@ class VolumeManager(manager.SchedulerDependentManager): # before passing it to the driver. volume_ref['host'] = self.host - driver = self._get_driver(volume_ref) try: vol_name = volume_ref['name'] vol_size = volume_ref['size'] LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals()) if snapshot_id == None: - model_update = driver.create_volume(volume_ref) + model_update = self.driver.create_volume(volume_ref) else: snapshot_ref = self.db.snapshot_get(context, snapshot_id) - model_update = driver.create_volume_from_snapshot( + model_update = self.driver.create_volume_from_snapshot( volume_ref, snapshot_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) LOG.debug(_("volume %s: creating export"), volume_ref['name']) - model_update = driver.create_export(context, volume_ref) + model_update = self.driver.create_export(context, volume_ref) if model_update: self.db.volume_update(context, volume_ref['id'], model_update) - # except Exception: - except: + except Exception: self.db.volume_update(context, volume_ref['id'], {'status': 'error'}) self._notify_vsa(context, volume_ref, 'error') @@ -181,15 +164,14 @@ class VolumeManager(manager.SchedulerDependentManager): if volume_ref['host'] != self.host: raise exception.Error(_("Volume is not local to this node")) - driver = self._get_driver(volume_ref) try: LOG.debug(_("volume %s: removing export"), volume_ref['name']) - driver.remove_export(context, volume_ref) + self.driver.remove_export(context, volume_ref) LOG.debug(_("volume %s: deleting"), volume_ref['name']) - driver.delete_volume(volume_ref) + self.driver.delete_volume(volume_ref) except exception.VolumeIsBusy, e: LOG.debug(_("volume %s: volume is busy"), volume_ref['name']) - driver.ensure_export(context, volume_ref) + self.driver.ensure_export(context, volume_ref) self.db.volume_update(context, volume_ref['id'], {'status': 'available'}) return True @@ -212,7 +194,6 @@ class VolumeManager(manager.SchedulerDependentManager): try: snap_name = snapshot_ref['name'] LOG.debug(_("snapshot %(snap_name)s: creating") % locals()) - # snapshot-related operations are irrelevant for vsadriver model_update = self.driver.create_snapshot(snapshot_ref) if model_update: self.db.snapshot_update(context, snapshot_ref['id'], @@ -236,7 +217,6 @@ class VolumeManager(manager.SchedulerDependentManager): try: LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name']) - # snapshot-related operations are irrelevant for vsadriver self.driver.delete_snapshot(snapshot_ref) except Exception: self.db.snapshot_update(context, @@ -254,29 +234,26 @@ class VolumeManager(manager.SchedulerDependentManager): Returns path to device.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) - driver = self._get_driver(volume_ref) if volume_ref['host'] == self.host and FLAGS.use_local_volumes: - path = driver.local_path(volume_ref) + path = self.driver.local_path(volume_ref) else: - path = driver.discover_volume(context, volume_ref) + path = self.driver.discover_volume(context, volume_ref) return path def remove_compute_volume(self, context, volume_id): """Remove remote volume on compute host.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) - driver = self._get_driver(volume_ref) if volume_ref['host'] == self.host and FLAGS.use_local_volumes: return True else: - driver.undiscover_volume(volume_ref) + self.driver.undiscover_volume(volume_ref) def check_for_export(self, context, instance_id): """Make sure whether volume is exported.""" instance_ref = self.db.instance_get(context, instance_id) for volume in instance_ref['volumes']: - driver = self._get_driver(volume) - driver.check_for_export(context, volume['id']) + self.driver.check_for_export(context, volume['id']) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" @@ -310,18 +287,20 @@ class VolumeManager(manager.SchedulerDependentManager): #if curr_time - self._last_host_check > FLAGS.volume_state_interval: # self._last_host_check = curr_time - LOG.info(_("Updating volume status")) - - volume_stats = self.vsadriver.get_volume_stats(refresh=True) - if self._volume_stats_changed(self._last_volume_stats, volume_stats): - LOG.info(_("New capabilities found: %s"), volume_stats) - self._last_volume_stats = volume_stats - - # This will grab info about the host and queue it - # to be sent to the Schedulers. - self.update_service_capabilities(self._last_volume_stats) - else: - self.update_service_capabilities(None) + volume_stats = self.driver.get_volume_stats(refresh=True) + if volume_stats: + LOG.info(_("Checking volume capabilities")) + + if self._volume_stats_changed(self._last_volume_stats, volume_stats): + + LOG.info(_("New capabilities found: %s"), volume_stats) + self._last_volume_stats = volume_stats + + # This will grab info about the host and queue it + # to be sent to the Schedulers. + self.update_service_capabilities(self._last_volume_stats) + else: + self.update_service_capabilities(None) def notification(self, context, event): LOG.info(_("Notification {%s} received"), event) diff --git a/nova/volume/san.py b/nova/volume/san.py index 6a962c6f2..be7869ac7 100644 --- a/nova/volume/san.py +++ b/nova/volume/san.py @@ -588,311 +588,3 @@ class HpSanISCSIDriver(SanISCSIDriver): cliq_args['volumeName'] = volume['name'] self._cliq_run_xml("unassignVolume", cliq_args) - - -class ZadaraVsaDriver(SanISCSIDriver): - """Executes commands relating to Virtual Storage Array volumes. - - There are two types of volumes. Front-end(FE) volumes and Back-end(BE) - volumes. - - FE volumes are nova-volumes that are exported by VSA instance & can be - consumed by user instances. We use SSH to connect into the VSA instance - to execute those steps. - - BE volumes are nova-volumes that are attached as back-end storage for the - VSA instance. - - VSA instance essentially consumes the BE volumes and allows creation of FE - volumes over it. - """ - - """ Volume Driver methods """ - def create_volume(self, volume): - """Creates FE/BE volume.""" - if volume['to_vsa_id']: - self._create_be_volume(volume) - else: - self._create_fe_volume(volume) - - def delete_volume(self, volume): - """Deletes FE/BE volume.""" - if volume['to_vsa_id']: - self._delete_be_volume(volume) - else: - self._delete_fe_volume(volume) - - def local_path(self, volume): - # TODO: Is this needed here? - raise exception.Error(_("local_path not supported")) - - def ensure_export(self, context, volume): - """On bootup synchronously ensures a volume export is available.""" - if volume['to_vsa_id']: - return self._ensure_be_export(context, volume) - - # Not required for FE volumes. VSA VM will ensure volume exposure - pass - - def create_export(self, context, volume): - """For first time creates volume export.""" - if volume['to_vsa_id']: - return self._create_be_export(context, volume) - else: - return self._create_fe_export(context, volume) - - def remove_export(self, context, volume): - if volume['to_vsa_id']: - return self._remove_be_export(context, volume) - else: - return self._remove_fe_export(context, volume) - - def check_for_setup_error(self): - """Returns an error if prerequisites aren't met""" - # skip the flags.san_ip check & do the regular check - - if not (FLAGS.san_password or FLAGS.san_privatekey): - raise exception.Error(_("Specify san_password or san_privatekey")) - - """ Internal BE Volume methods """ - def _create_be_volume(self, volume): - """Creates BE volume.""" - if int(volume['size']) == 0: - sizestr = '0' # indicates full-partition - else: - sizestr = '%s' % (int(volume['size']) << 30) # size in bytes - - # Set the qos-str to default type sas - # TODO - later for this piece we will get the direct qos-group name - # in create_volume and hence this lookup will not be needed - qosstr = 'SAS_1000' - drive_type = volume.get('drive_type') - if drive_type is not None: - # for now just use the qos-type string from the disktypes. - qosstr = drive_type['type'] + ("_%s" % drive_type['size_gb']) - - self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', - 'create_qospart', - '--qos', qosstr, - '--pname', volume['name'], - '--psize', sizestr, - check_exit_code=0) - LOG.debug(_("VSA BE create_volume for %s succeeded"), volume['name']) - - def _delete_be_volume(self, volume): - try: - self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', - 'delete_partition', - '--pname', volume['name'], - check_exit_code=0) - except exception.ProcessExecutionError: - LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name']) - return - - LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name']) - - def _create_be_export(self, context, volume): - """create BE export for a volume""" - self._ensure_iscsi_targets(context, volume['host']) - iscsi_target = self.db.volume_allocate_iscsi_target(context, - volume['id'], - volume['host']) - return self._common_be_export(context, volume, iscsi_target) - - def _ensure_be_export(self, context, volume): - """ensure BE export for a volume""" - try: - iscsi_target = self.db.volume_get_iscsi_target_num(context, - volume['id']) - except exception.NotFound: - LOG.info(_("Skipping ensure_export. No iscsi_target " + - "provisioned for volume: %d"), volume['id']) - return - - return self._common_be_export(context, volume, iscsi_target) - - def _common_be_export(self, context, volume, iscsi_target): - """ - Common logic that asks zadara_sncfg to setup iSCSI target/lun for - this volume - """ - (out, err) = self._sync_exec('sudo', - '/var/lib/zadara/bin/zadara_sncfg', - 'create_export', - '--pname', volume['name'], - '--tid', iscsi_target, - check_exit_code=0) - - result_xml = ElementTree.fromstring(out) - response_node = result_xml.find("Sn") - if response_node is None: - msg = "Malformed response from zadara_sncfg" - raise exception.Error(msg) - - sn_ip = response_node.findtext("SnIp") - sn_iqn = response_node.findtext("IqnName") - iscsi_portal = sn_ip + ":3260," + ("%s" % iscsi_target) - - model_update = {} - model_update['provider_location'] = ("%s %s" % - (iscsi_portal, - sn_iqn)) - return model_update - - def _remove_be_export(self, context, volume): - """Removes BE export for a volume.""" - try: - iscsi_target = self.db.volume_get_iscsi_target_num(context, - volume['id']) - except exception.NotFound: - LOG.info(_("Skipping remove_export. No iscsi_target " + - "provisioned for volume: %d"), volume['id']) - return - - try: - self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', - 'remove_export', - '--pname', volume['name'], - '--tid', iscsi_target, - check_exit_code=0) - except exception.ProcessExecutionError: - LOG.debug(_("VSA BE remove_export for %s failed"), volume['name']) - return - - def _get_qosgroup_summary(self): - """gets the list of qosgroups from Zadara SN""" - (out, err) = self._sync_exec('sudo', - '/var/lib/zadara/bin/zadara_sncfg', - 'get_qosgroups_xml', - check_exit_code=0) - qos_groups = {} - #qos_groups = [] - result_xml = ElementTree.fromstring(out) - for element in result_xml.findall('QosGroup'): - qos_group = {} - # get the name of the group. - # If we cannot find it, forget this element - group_name = element.findtext("Name") - if not group_name: - continue - - # loop through all child nodes & fill up attributes of this group - for child in element.getchildren(): - # two types of elements - property of qos-group & sub property - # classify them accordingly - if child.text: - qos_group[child.tag] = int(child.text) \ - if child.text.isdigit() else child.text - else: - subelement = {} - for subchild in child.getchildren(): - subelement[subchild.tag] = int(subchild.text) \ - if subchild.text.isdigit() else subchild.text - qos_group[child.tag] = subelement - - # Now add this group to the master qos_groups - qos_groups[group_name] = qos_group - #qos_groups.append(qos_group) - - return qos_groups - - """ Internal FE Volume methods """ - def _vsa_run(self, volume, verb, vsa_args): - """ - Runs a command over SSH to VSA instance and checks for return status - """ - vsa_arg_strings = [] - - if vsa_args: - for k, v in vsa_args.items(): - vsa_arg_strings.append(" --%s %s" % (k, v)) - - # Form the zadara_cfg script that will do the configuration at VSA VM - cmd = "/var/lib/zadara/bin/zadara_cfg.py " + verb + \ - ''.join(vsa_arg_strings) - - # get the list of IP's corresponding to VSA VM's - vsa_ips = self.db.vsa_get_vc_ips_list(context.get_admin_context(), - volume['from_vsa_id']) - if not vsa_ips: - raise exception.Error(_("Cannot Lookup VSA VM's IP")) - return - - # pick the first element in the return's fixed_ip for SSH - vsa_ip = vsa_ips[0]['fixed'] - - (out, _err) = self._run_ssh(cmd, san_ip=vsa_ip) - - # check the xml StatusCode to check fro real status - result_xml = ElementTree.fromstring(out) - - status = result_xml.findtext("StatusCode") - if status != '0': - statusmsg = result_xml.findtext("StatusMessage") - msg = (_('vsa_run failed to ' + verb + ' for ' + volume['name'] + - '. Result=' + str(statusmsg))) - raise exception.Error(msg) - - return out, _err - - def _create_fe_volume(self, volume): - """Creates FE volume.""" - vsa_args = {} - vsa_args['volname'] = volume['name'] - if int(volume['size']) == 0: - sizestr = '100M' - else: - sizestr = '%sG' % volume['size'] - vsa_args['volsize'] = sizestr - (out, _err) = self._vsa_run(volume, "create_volume", vsa_args) - - LOG.debug(_("VSA FE create_volume for %s suceeded"), volume['name']) - - def _delete_fe_volume(self, volume): - """Deletes FE volume.""" - vsa_args = {} - vsa_args['volname'] = volume['name'] - (out, _err) = self._vsa_run(volume, "delete_volume", vsa_args) - LOG.debug(_("VSA FE delete_volume for %s suceeded"), volume['name']) - return - - def _create_fe_export(self, context, volume): - """Create FE volume exposure at VSA VM""" - vsa_args = {} - vsa_args['volname'] = volume['name'] - (out, _err) = self._vsa_run(volume, "create_export", vsa_args) - - result_xml = ElementTree.fromstring(out) - response_node = result_xml.find("Vsa") - if response_node is None: - msg = "Malformed response to VSA command " - raise exception.Error(msg) - - LOG.debug(_("VSA create_export for %s suceeded"), volume['name']) - - vsa_ip = response_node.findtext("VsaIp") - vsa_iqn = response_node.findtext("IqnName") - vsa_interface = response_node.findtext("VsaInterface") - iscsi_portal = vsa_ip + ":3260," + vsa_interface - - model_update = {} - model_update['provider_location'] = ("%s %s" % - (iscsi_portal, - vsa_iqn)) - - return model_update - - def remove_fe_export(self, context, volume): - """Remove FE volume exposure at VSA VM""" - vsa_args = {} - vsa_args['volname'] = volume['name'] - (out, _err) = self._vsa_run(volume, "remove_export", vsa_args) - LOG.debug(_("VSA FE remove_export for %s suceeded"), volume['name']) - return - - def get_volume_stats(self, refresh=False): - """Return the current state of the volume service. If 'refresh' is - True, run the update first.""" - - drive_info = self._get_qosgroup_summary() - return {'drive_qos_info': drive_info} diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 7ce643aab..b366b6587 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -79,7 +79,7 @@ class API(base.Base): # find DB record for this disk try: - drive_ref = drive_types.drive_type_get_by_name(context, name) + drive_ref = drive_types.get_by_name(context, name) except exception.NotFound: raise exception.ApiError(_("Invalid drive type name %s"), name) diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py index 781206cdf..5bec96047 100644 --- a/nova/vsa/drive_types.py +++ b/nova/vsa/drive_types.py @@ -43,8 +43,8 @@ def _generate_default_drive_name(type, size_gb, rpm, capabilities): (type, str(size_gb), rpm, capabilities) -def drive_type_create(context, type, size_gb, rpm, - capabilities='', visible=True, name=None): +def create(context, type, size_gb, rpm, capabilities='', + visible=True, name=None): if name is None: name = _generate_default_drive_name(type, size_gb, rpm, capabilities) @@ -62,12 +62,12 @@ def drive_type_create(context, type, size_gb, rpm, return db.drive_type_create(context, values) -def drive_type_update(context, name, **kwargs): - LOG.debug(_("Updating drive type %(name)s: "), locals()) - return db.drive_type_update(context, name, kwargs) +def update(context, id, **kwargs): + LOG.debug(_("Updating drive type with id %(id)s"), locals()) + return db.drive_type_update(context, id, kwargs) -def drive_type_rename(context, name, new_name=None): +def rename(context, name, new_name=None): if new_name is None or \ new_name == '': @@ -78,21 +78,22 @@ def drive_type_rename(context, name, new_name=None): LOG.debug(_("Renaming drive type %(name)s to %(new_name)s"), locals()) values = dict(name=new_name) - return db.drive_type_update(context, name, values) + dtype = db.drive_type_get_by_name(context, name) + return db.drive_type_update(context, dtype['id'], values) -def drive_type_delete(context, name): - LOG.debug(_("Deleting drive type %(name)s"), locals()) - db.drive_type_destroy(context, name) +def delete(context, id): + LOG.debug(_("Deleting drive type %d"), id) + db.drive_type_destroy(context, id) -def drive_type_get(context, id): +def get(context, id): return db.drive_type_get(context, id) -def drive_type_get_by_name(context, name): +def get_by_name(context, name): return db.drive_type_get_by_name(context, name) -def drive_type_get_all(context, visible=None): +def get_all(context, visible=True): return db.drive_type_get_all(context, visible) -- cgit From 0750370553c3ce40fdd5e88d9616ddb0fbeedbc1 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 22 Jul 2011 15:22:05 -0700 Subject: pep8-compliant. Prior to merge with 1305 --- nova/volume/manager.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index d2c36e96f..348dab782 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -291,15 +291,16 @@ class VolumeManager(manager.SchedulerDependentManager): if volume_stats: LOG.info(_("Checking volume capabilities")) - if self._volume_stats_changed(self._last_volume_stats, volume_stats): - + if self._volume_stats_changed(self._last_volume_stats, + volume_stats): LOG.info(_("New capabilities found: %s"), volume_stats) self._last_volume_stats = volume_stats - + # This will grab info about the host and queue it # to be sent to the Schedulers. self.update_service_capabilities(self._last_volume_stats) else: + # avoid repeating fanouts self.update_service_capabilities(None) def notification(self, context, event): -- cgit From 61781dae931ced36db0f2735da474d0bd38a53cf Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 22 Jul 2011 20:25:32 -0700 Subject: more unittest changes --- .../openstack/contrib/virtual_storage_arrays.py | 4 + nova/tests/api/openstack/contrib/test_vsa.py | 238 +++++++++++++++++++-- nova/virt/libvirt/netutils.py | 1 - 3 files changed, 224 insertions(+), 19 deletions(-) diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index 3c1362f0c..6139b494e 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -353,6 +353,10 @@ class VsaDriveController(VsaVolumeDriveController): """Update a drive. Should be done through VSA APIs""" return faults.Fault(exc.HTTPBadRequest()) + def delete(self, req, vsa_id, id): + """Delete a volume. Should be done through VSA APIs""" + return faults.Fault(exc.HTTPBadRequest()) + class VsaVPoolController(object): """The vPool VSA API controller for the OpenStack API.""" diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py index bc0b7eaa6..c3150fa9c 100644 --- a/nova/tests/api/openstack/contrib/test_vsa.py +++ b/nova/tests/api/openstack/contrib/test_vsa.py @@ -15,18 +15,26 @@ import json import stubout +import unittest import webob -#from nova import compute -from nova import vsa + from nova import exception +from nova import flags +from nova import vsa +from nova import db +from nova import volume from nova import context from nova import test from nova import log as logging +from nova.api import openstack from nova.tests.api.openstack import fakes +import nova.wsgi from nova.api.openstack.contrib.virtual_storage_arrays import _vsa_view +FLAGS = flags.FLAGS + LOG = logging.getLogger('nova.tests.api.openstack.vsa') last_param = {} @@ -103,7 +111,7 @@ class VSAApiTest(test.TestCase): self.stubs.UnsetAll() super(VSAApiTest, self).tearDown() - def test_vsa_api_create(self): + def test_vsa_create(self): global last_param last_param = {} @@ -128,7 +136,7 @@ class VSAApiTest(test.TestCase): self.assertEqual(resp_dict['vsa']['displayDescription'], vsa['displayDescription']) - def test_vsa_api_create_no_body(self): + def test_vsa_create_no_body(self): req = webob.Request.blank('/v1.1/zadr-vsa') req.method = 'POST' req.body = json.dumps({}) @@ -137,7 +145,7 @@ class VSAApiTest(test.TestCase): resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 422) - def test_vsa_api_delete(self): + def test_vsa_delete(self): global last_param last_param = {} @@ -149,7 +157,7 @@ class VSAApiTest(test.TestCase): self.assertEqual(resp.status_int, 200) self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) - def test_vsa_api_delete_invalid_id(self): + def test_vsa_delete_invalid_id(self): global last_param last_param = {} @@ -161,7 +169,7 @@ class VSAApiTest(test.TestCase): self.assertEqual(resp.status_int, 404) self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) - def test_vsa_api_show(self): + def test_vsa_show(self): global last_param last_param = {} @@ -176,7 +184,7 @@ class VSAApiTest(test.TestCase): self.assertTrue('vsa' in resp_dict) self.assertEqual(resp_dict['vsa']['id'], str(vsa_id)) - def test_vsa_api_show_invalid_id(self): + def test_vsa_show_invalid_id(self): global last_param last_param = {} @@ -187,7 +195,7 @@ class VSAApiTest(test.TestCase): self.assertEqual(resp.status_int, 404) self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) - def test_vsa_api_index(self): + def test_vsa_index(self): req = webob.Request.blank('/v1.1/zadr-vsa') req.method = 'GET' resp = req.get_response(fakes.wsgi_app()) @@ -202,7 +210,7 @@ class VSAApiTest(test.TestCase): resp_vsa = resp_vsas.pop() self.assertEqual(resp_vsa['id'], 123) - def test_vsa_api_detail(self): + def test_vsa_detail(self): req = webob.Request.blank('/v1.1/zadr-vsa/detail') req.method = 'GET' resp = req.get_response(fakes.wsgi_app()) @@ -218,22 +226,216 @@ class VSAApiTest(test.TestCase): self.assertEqual(resp_vsa['id'], 123) -class VSAVolumeDriveApiTest(test.TestCase): - def setUp(self): - super(VSAVolumeDriveApiTest, self).setUp() +def _get_default_volume_param(): + return { + 'id': 123, + 'status': 'available', + 'size': 100, + 'availability_zone': 'nova', + 'created_at': None, + 'attach_status': 'detached', + 'display_name': 'Default vol name', + 'display_description': 'Default vol description', + 'from_vsa_id': None, + 'to_vsa_id': None, + } + + +def stub_volume_create(self, context, size, snapshot_id, name, description, + **param): + LOG.debug(_("_create: param=%s"), size) + vol = _get_default_volume_param() + for k, v in param.iteritems(): + vol[k] = v + vol['size'] = size + vol['display_name'] = name + vol['display_description'] = description + return vol + + +def stub_volume_update(self, context, **param): + LOG.debug(_("_volume_update: param=%s"), param) + pass + + +def stub_volume_delete(self, context, **param): + LOG.debug(_("_volume_delete: param=%s"), param) + pass + + +def stub_volume_get(self, context, volume_id): + LOG.debug(_("_volume_get: volume_id=%s"), volume_id) + vol = _get_default_volume_param() + vol['id'] = volume_id + if volume_id == '234': + vol['from_vsa_id'] = 123 + if volume_id == '345': + vol['to_vsa_id'] = 123 + return vol + + +def stub_volume_get_notfound(self, context, volume_id): + raise exception.NotFound + + +def stub_volume_get_all_by_vsa(self, context, vsa_id, direction): + vol = stub_volume_get(self, context, '123') + vol['%s_vsa_id' % direction] = vsa_id + return [vol] + + +def return_vsa(context, vsa_id): + return {'id': vsa_id} + + +class VSAVolumeApiTest(test.TestCase): + + def setUp(self, test_obj=None, test_objs=None): + super(VSAVolumeApiTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.reset_fake_data() fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_auth(self.stubs) - self.stubs.Set(vsa.api.API, "create", stub_vsa_create) - self.stubs.Set(vsa.api.API, "delete", stub_vsa_delete) - self.stubs.Set(vsa.api.API, "get", stub_vsa_get) - self.stubs.Set(vsa.api.API, "get_all", stub_vsa_get_all) + self.stubs.Set(nova.db.api, 'vsa_get', return_vsa) + + self.stubs.Set(volume.api.API, "create", stub_volume_create) + self.stubs.Set(volume.api.API, "update", stub_volume_update) + self.stubs.Set(volume.api.API, "delete", stub_volume_delete) + self.stubs.Set(volume.api.API, "get_all_by_vsa", + stub_volume_get_all_by_vsa) + self.stubs.Set(volume.api.API, "get", stub_volume_get) self.context = context.get_admin_context() + self.test_obj = test_obj if test_obj else "volume" + self.test_objs = test_objs if test_objs else "volumes" + + def tearDown(self): + self.stubs.UnsetAll() + super(VSAVolumeApiTest, self).tearDown() + + def test_vsa_volume_create(self): + vol = {"size": 100, + "displayName": "VSA Volume Test Name", + "displayDescription": "VSA Volume Test Desc"} + body = {self.test_obj: vol} + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s' % self.test_objs) + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + resp = req.get_response(fakes.wsgi_app()) + + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 200) + + resp_dict = json.loads(resp.body) + self.assertTrue(self.test_obj in resp_dict) + self.assertEqual(resp_dict[self.test_obj]['size'], + vol['size']) + self.assertEqual(resp_dict[self.test_obj]['displayName'], + vol['displayName']) + self.assertEqual(resp_dict[self.test_obj]['displayDescription'], + vol['displayDescription']) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_create_no_body(self): + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s' % self.test_objs) + req.method = 'POST' + req.body = json.dumps({}) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 422) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_index(self): + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s' % self.test_objs) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + def test_vsa_volume_detail(self): + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/detail' % \ + self.test_objs) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + def test_vsa_volume_show(self): + obj_num = 234 if self.test_objs == "volumes" else 345 + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ + (self.test_objs, obj_num)) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + + def test_vsa_volume_show_no_vsa_assignment(self): + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + (self.test_objs)) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_show_no_volume(self): + self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound) + + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + (self.test_objs)) + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 404) + + def test_vsa_volume_update(self): + obj_num = 234 if self.test_objs == "volumes" else 345 + update = {"status": "available"} + body = {self.test_obj: update} + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ + (self.test_objs, obj_num)) + req.method = 'PUT' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 202) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_delete(self): + obj_num = 234 if self.test_objs == "volumes" else 345 + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ + (self.test_objs, obj_num)) + req.method = 'DELETE' + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 202) + else: + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_delete_no_vsa_assignment(self): + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + (self.test_objs)) + req.method = 'DELETE' + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_vsa_volume_delete_no_volume(self): + self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound) + + req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + (self.test_objs)) + req.method = 'DELETE' + resp = req.get_response(fakes.wsgi_app()) + if self.test_obj == "volume": + self.assertEqual(resp.status_int, 404) + else: + self.assertEqual(resp.status_int, 400) + + +class VSADriveApiTest(VSAVolumeApiTest): + def setUp(self): + super(VSADriveApiTest, self).setUp(test_obj="drive", + test_objs="drives") def tearDown(self): self.stubs.UnsetAll() - super(VSAVolumeDriveApiTest, self).tearDown() + super(VSADriveApiTest, self).tearDown() diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py index c0d808cd3..041eacb2d 100644 --- a/nova/virt/libvirt/netutils.py +++ b/nova/virt/libvirt/netutils.py @@ -99,7 +99,6 @@ def get_network_info(instance): if network['dns2']: mapping['dns'].append(network['dns2']) - if FLAGS.use_ipv6: mapping['ip6s'] = [ip6_dict()] mapping['gateway6'] = network['gateway_v6'] -- cgit From fb755ae05b0b6a7b3701614c8d702e8a24ff380c Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Sun, 24 Jul 2011 00:07:00 -0700 Subject: some cosmetic changes. Prior to merge proposal --- nova/tests/test_vsa.py | 185 +++++++++++++++++++++++++++++++++++++++++++++++++ nova/vsa/api.py | 44 +++++++----- 2 files changed, 212 insertions(+), 17 deletions(-) create mode 100644 nova/tests/test_vsa.py diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py new file mode 100644 index 000000000..859fe3325 --- /dev/null +++ b/nova/tests/test_vsa.py @@ -0,0 +1,185 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stubout +import base64 + +from xml.etree import ElementTree +from xml.etree.ElementTree import Element, SubElement + +from nova import exception +from nova import flags +from nova import vsa +from nova import db +from nova import context +from nova import test +from nova import log as logging +import nova.image.fake + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.vsa') + + +def fake_drive_type_get_by_name(context, name): + drive_type = { + 'id': 1, + 'name': name, + 'type': name.split('_')[0], + 'size_gb': int(name.split('_')[1]), + 'rpm': name.split('_')[2], + 'capabilities': '', + 'visible': True} + return drive_type + + +class VsaTestCase(test.TestCase): + + def setUp(self): + super(VsaTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.vsa_api = vsa.API() + + self.context_non_admin = context.RequestContext(None, None) + self.context = context.get_admin_context() + + def fake_show_by_name(meh, context, name): + if name == 'wrong_image_name': + LOG.debug(_("Test: Emulate wrong VSA name. Raise")) + raise exception.ImageNotFound + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} + + self.stubs.Set(nova.image.fake._FakeImageService, 'show_by_name', + fake_show_by_name) + + def tearDown(self): + self.stubs.UnsetAll() + super(VsaTestCase, self).tearDown() + + def test_vsa_create_delete_defaults(self): + param = {'display_name': 'VSA name test'} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['display_name'], param['display_name']) + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_create_delete_check_in_db(self): + vsa_list1 = self.vsa_api.get_all(self.context) + vsa_ref = self.vsa_api.create(self.context) + vsa_list2 = self.vsa_api.get_all(self.context) + self.assertEqual(len(vsa_list2), len(vsa_list1) + 1) + + self.vsa_api.delete(self.context, vsa_ref['id']) + vsa_list3 = self.vsa_api.get_all(self.context) + self.assertEqual(len(vsa_list3), len(vsa_list2) - 1) + + def test_vsa_create_delete_high_vc_count(self): + param = {'vc_count': FLAGS.max_vcs_in_vsa + 1} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa) + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_create_wrong_image_name(self): + param = {'image_name': 'wrong_image_name'} + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context, **param) + + def test_vsa_create_db_error(self): + + def fake_vsa_create(context, options): + LOG.debug(_("Test: Emulate DB error. Raise")) + raise exception.Error + + self.stubs.Set(nova.db.api, 'vsa_create', fake_vsa_create) + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context) + + def test_vsa_create_wrong_storage_params(self): + vsa_list1 = self.vsa_api.get_all(self.context) + param = {'storage': [{'stub': 1}]} + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context, **param) + vsa_list2 = self.vsa_api.get_all(self.context) + self.assertEqual(len(vsa_list2), len(vsa_list1) + 1) + + param = {'storage': [{'drive_name': 'wrong name'}]} + self.assertRaises(exception.ApiError, + self.vsa_api.create, self.context, **param) + + def test_vsa_create_with_storage(self, multi_vol_creation=True): + """Test creation of VSA with BE storage""" + + FLAGS.vsa_multi_vol_creation = multi_vol_creation + + self.stubs.Set(nova.vsa.drive_types, 'get_by_name', + fake_drive_type_get_by_name) + + param = {'storage': [{'drive_name': 'SATA_500_7200', + 'num_drives': 3}]} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['vol_count'], 3) + self.vsa_api.delete(self.context, vsa_ref['id']) + + param = {'storage': [{'drive_name': 'SATA_500_7200', + 'num_drives': 3}], + 'shared': True} + vsa_ref = self.vsa_api.create(self.context, **param) + self.assertEqual(vsa_ref['vol_count'], 15) + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_create_with_storage_single_volumes(self): + self.test_vsa_create_with_storage(multi_vol_creation=False) + + def test_vsa_update(self): + vsa_ref = self.vsa_api.create(self.context) + + param = {'vc_count': FLAGS.max_vcs_in_vsa + 1} + vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param) + self.assertEqual(vsa_ref['vc_count'], FLAGS.max_vcs_in_vsa) + + param = {'vc_count': 2} + vsa_ref = self.vsa_api.update(self.context, vsa_ref['id'], **param) + self.assertEqual(vsa_ref['vc_count'], 2) + + self.vsa_api.delete(self.context, vsa_ref['id']) + + def test_vsa_generate_user_data(self): + self.stubs.Set(nova.vsa.drive_types, 'get_by_name', + fake_drive_type_get_by_name) + + FLAGS.vsa_multi_vol_creation = False + param = {'display_name': 'VSA name test', + 'display_description': 'VSA desc test', + 'vc_count': 2, + 'storage': [{'drive_name': 'SATA_500_7200', + 'num_drives': 3}]} + vsa_ref = self.vsa_api.create(self.context, **param) + volumes = db.volume_get_all_assigned_to_vsa(self.context, + vsa_ref['id']) + + user_data = self.vsa_api.generate_user_data(self.context, + vsa_ref, + volumes) + user_data = base64.b64decode(user_data) + + LOG.debug(_("Test: user_data = %s"), user_data) + + elem = ElementTree.fromstring(user_data) + self.assertEqual(elem.findtext('name'), + param['display_name']) + self.assertEqual(elem.findtext('description'), + param['display_description']) + self.assertEqual(elem.findtext('vc_count'), + str(param['vc_count'])) + + self.vsa_api.delete(self.context, vsa_ref['id']) diff --git a/nova/vsa/api.py b/nova/vsa/api.py index b366b6587..80637cc9e 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -74,15 +74,15 @@ class API(base.Base): num_disks = node.get('num_drives', 1) if name is None: - raise exception.ApiError(_("No drive_name param found in %s"), - node) + raise exception.ApiError(_("No drive_name param found in %s") + % node) # find DB record for this disk try: drive_ref = drive_types.get_by_name(context, name) except exception.NotFound: - raise exception.ApiError(_("Invalid drive type name %s"), - name) + raise exception.ApiError(_("Invalid drive type name %s") + % name) # if size field present - override disk size specified in DB size = node.get('size', drive_ref['size_gb']) @@ -149,8 +149,8 @@ class API(base.Base): vc_image = image_service.show_by_name(context, image_name) vc_image_href = vc_image['id'] except exception.ImageNotFound: - raise exception.ApiError(_("Failed to find configured image %s"), - image_name) + raise exception.ApiError(_("Failed to find configured image %s") + % image_name) options = { 'display_name': display_name, @@ -258,34 +258,42 @@ class API(base.Base): """ LOG.info(_("VSA ID %(vsa_id)d: Update VSA call"), locals()) + updatable_fields = ['status', 'vc_count', 'vol_count', + 'display_name', 'display_description'] + changes = {} + for field in updatable_fields: + if field in kwargs: + changes[field] = kwargs[field] + vc_count = kwargs.get('vc_count', None) if vc_count is not None: # VP-TODO: This request may want to update number of VCs # Get number of current VCs and add/delete VCs appropriately vsa = self.get(context, vsa_id) vc_count = int(vc_count) + if vc_count > FLAGS.max_vcs_in_vsa: + LOG.warning(_("Requested number of VCs (%d) is too high."\ + " Setting to default"), vc_count) + vc_count = FLAGS.max_vcs_in_vsa + if vsa['vc_count'] != vc_count: self.update_num_vcs(context, vsa, vc_count) + changes['vc_count'] = vc_count - return self.db.vsa_update(context, vsa_id, kwargs) + return self.db.vsa_update(context, vsa_id, changes) def update_num_vcs(self, context, vsa, vc_count): - if vc_count > FLAGS.max_vcs_in_vsa: - LOG.warning(_("Requested number of VCs (%d) is too high."\ - " Setting to default"), vc_count) - vc_count = FLAGS.max_vcs_in_vsa - vsa_name = vsa['name'] - old_vc_count = vsa['vc_count'] + old_vc_count = int(vsa['vc_count']) if vc_count > old_vc_count: add_cnt = vc_count - old_vc_count - LOG.debug(_("Adding %(add_cnt)d VCs to VSA %(vsa_name)s."), + LOG.debug(_("Adding %(add_cnt)s VCs to VSA %(vsa_name)s."), locals()) # VP-TODO: actual code for adding new VCs elif vc_count < old_vc_count: del_cnt = old_vc_count - vc_count - LOG.debug(_("Deleting %(add_cnt)d VCs from VSA %(vsa_name)s."), + LOG.debug(_("Deleting %(del_cnt)s VCs from VSA %(vsa_name)s."), locals()) # VP-TODO: actual code for deleting extra VCs @@ -372,9 +380,11 @@ class API(base.Base): e_vsa_detail = SubElement(e_vsa, "vc_count") e_vsa_detail.text = str(vsa['vc_count']) e_vsa_detail = SubElement(e_vsa, "auth_user") - e_vsa_detail.text = str(context.user.name) + if context.user is not None: + e_vsa_detail.text = str(context.user.name) e_vsa_detail = SubElement(e_vsa, "auth_access_key") - e_vsa_detail.text = str(context.user.access) + if context.user is not None: + e_vsa_detail.text = str(context.user.access) e_volumes = SubElement(e_vsa, "volumes") for volume in volumes: -- cgit From a719befe3e28994c02aab70e4b0e1871b318d971 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Sun, 24 Jul 2011 00:24:31 -0700 Subject: some file attrib changes --- bin/nova-logspool | 0 bin/nova-spoolsentry | 0 contrib/nova.sh | 0 plugins/xenserver/xenapi/etc/xapi.d/plugins/agent | 0 tools/clean-vlans | 0 tools/nova-debug | 0 6 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 bin/nova-logspool mode change 100755 => 100644 bin/nova-spoolsentry mode change 100644 => 100755 contrib/nova.sh mode change 100644 => 100755 plugins/xenserver/xenapi/etc/xapi.d/plugins/agent mode change 100644 => 100755 tools/clean-vlans mode change 100644 => 100755 tools/nova-debug diff --git a/bin/nova-logspool b/bin/nova-logspool old mode 100755 new mode 100644 diff --git a/bin/nova-spoolsentry b/bin/nova-spoolsentry old mode 100755 new mode 100644 diff --git a/contrib/nova.sh b/contrib/nova.sh old mode 100644 new mode 100755 diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent old mode 100644 new mode 100755 diff --git a/tools/clean-vlans b/tools/clean-vlans old mode 100644 new mode 100755 diff --git a/tools/nova-debug b/tools/nova-debug old mode 100644 new mode 100755 -- cgit From c500eac4589e9cb22e5e71b900164a151290ec03 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Mon, 25 Jul 2011 16:26:23 -0700 Subject: some cleanup. VSA flag status changes. returned some files --- bin/nova-vsa | 8 ++------ nova/CA/newcerts/.placeholder | 0 nova/CA/private/.placeholder | 0 nova/CA/projects/.gitignore | 1 + nova/CA/projects/.placeholder | 0 nova/CA/reqs/.gitignore | 1 + nova/CA/reqs/.placeholder | 0 nova/api/ec2/cloud.py | 1 - nova/api/openstack/contrib/drive_types.py | 2 +- .../openstack/contrib/virtual_storage_arrays.py | 19 +++++++++++-------- .../migrate_repo/versions/036_add_vsa_data.py | 1 + nova/flags.py | 14 -------------- nova/scheduler/vsa.py | 6 ++++-- nova/tests/api/openstack/contrib/test_vsa.py | 4 +++- nova/tests/test_drive_types.py | 1 + nova/volume/driver.py | 21 +++------------------ nova/vsa/__init__.py | 1 + nova/vsa/api.py | 22 ++++++++++++++++------ nova/vsa/connection.py | 1 + nova/vsa/drive_types.py | 1 + nova/vsa/fake.py | 1 + nova/vsa/manager.py | 12 ++++++------ 22 files changed, 54 insertions(+), 63 deletions(-) create mode 100644 nova/CA/newcerts/.placeholder create mode 100644 nova/CA/private/.placeholder create mode 100644 nova/CA/projects/.gitignore create mode 100644 nova/CA/projects/.placeholder create mode 100644 nova/CA/reqs/.gitignore create mode 100644 nova/CA/reqs/.placeholder diff --git a/bin/nova-vsa b/bin/nova-vsa index b15b7c7ed..a67fe952d 100755 --- a/bin/nova-vsa +++ b/bin/nova-vsa @@ -1,8 +1,8 @@ #!/usr/bin/env python # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -18,10 +18,6 @@ # under the License. """Starter script for Nova VSA.""" - -import eventlet -eventlet.monkey_patch() - import gettext import os import sys diff --git a/nova/CA/newcerts/.placeholder b/nova/CA/newcerts/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/nova/CA/private/.placeholder b/nova/CA/private/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/nova/CA/projects/.gitignore b/nova/CA/projects/.gitignore new file mode 100644 index 000000000..72e8ffc0d --- /dev/null +++ b/nova/CA/projects/.gitignore @@ -0,0 +1 @@ +* diff --git a/nova/CA/projects/.placeholder b/nova/CA/projects/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/nova/CA/reqs/.gitignore b/nova/CA/reqs/.gitignore new file mode 100644 index 000000000..72e8ffc0d --- /dev/null +++ b/nova/CA/reqs/.gitignore @@ -0,0 +1 @@ +* diff --git a/nova/CA/reqs/.placeholder b/nova/CA/reqs/.placeholder new file mode 100644 index 000000000..e69de29bb diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 56a5850f6..6fc74c92a 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -902,7 +902,6 @@ class CloudController(object): image_name = kwargs.get('image_name') availability_zone = kwargs.get('placement', {}).get( 'AvailabilityZone') - #storage = ast.literal_eval(kwargs.get('storage', '[]')) storage = kwargs.get('storage', []) shared = kwargs.get('shared', False) diff --git a/nova/api/openstack/contrib/drive_types.py b/nova/api/openstack/contrib/drive_types.py index 590eaaec0..6454fd81f 100644 --- a/nova/api/openstack/contrib/drive_types.py +++ b/nova/api/openstack/contrib/drive_types.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -118,7 +119,6 @@ class DriveTypeController(object): drive_types.delete(context, id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - # return exc.HTTPAccepted() class Drive_types(extensions.ExtensionDescriptor): diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index 6139b494e..68a00fd7d 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -153,7 +154,6 @@ class VsaController(object): self.vsa_api.delete(context, vsa_id=id) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - # return exc.HTTPAccepted() class VsaVolumeDriveController(volumes.VolumeController): @@ -193,6 +193,7 @@ class VsaVolumeDriveController(volumes.VolumeController): d = translation(context, vol) d['vsaId'] = vol[self.direction] + d['name'] = vol['name'] return d def _check_volume_ownership(self, context, vsa_id, id): @@ -265,15 +266,17 @@ class VsaVolumeDriveController(volumes.VolumeController): return faults.Fault(exc.HTTPBadRequest()) vol = body[self.object] - updatable_fields = ['display_name', - 'display_description', - 'status', - 'provider_location', - 'provider_auth'] + updatable_fields = [{'displayName': 'display_name'}, + {'displayDescription': 'display_description'}, + {'status': 'status'}, + {'providerLocation': 'provider_location'}, + {'providerAuth': 'provider_auth'}] changes = {} for field in updatable_fields: - if field in vol: - changes[field] = vol[field] + key = field.keys()[0] + val = field[key] + if key in vol: + changes[val] = vol[key] obj = self.object LOG.audit(_("Update %(obj)s with id: %(id)s, changes: %(changes)s"), diff --git a/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py index 7fc8f955c..5d2e56a7e 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/flags.py b/nova/flags.py index 8000eac4a..9f5965919 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -378,20 +378,6 @@ DEFINE_integer('max_vcs_in_vsa', 32, DEFINE_integer('vsa_part_size_gb', 100, 'default partition size for shared capacity') -DEFINE_string('vsa_status_creating', 'creating', - 'VSA creating (not ready yet)') -DEFINE_string('vsa_status_launching', 'launching', - 'Launching VCs (all BE volumes were created)') -DEFINE_string('vsa_status_created', 'created', - 'VSA fully created and ready for use') -DEFINE_string('vsa_status_partial', 'partial', - 'Some BE storage allocations failed') -DEFINE_string('vsa_status_failed', 'failed', - 'Some BE storage allocations failed') -DEFINE_string('vsa_status_deleting', 'deleting', - 'VSA started the deletion procedure') - - # The service to use for image search and retrieval DEFINE_string('image_service', 'nova.image.glance.GlanceImageService', 'The service to use for retrieving and searching for images.') diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index 059afce68..6931afc2b 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -24,6 +25,7 @@ from nova import rpc from nova import db from nova import flags from nova import utils +from nova.vsa.api import VsaState from nova.volume import api as volume_api from nova.scheduler import driver from nova.scheduler import simple @@ -301,7 +303,7 @@ class VsaScheduler(simple.SimpleScheduler): except: if vsa_id: db.vsa_update(context, vsa_id, - dict(status=FLAGS.vsa_status_failed)) + dict(status=VsaState.FAILED)) for vol in volume_params: if 'capabilities' in vol: @@ -346,7 +348,7 @@ class VsaScheduler(simple.SimpleScheduler): except: if volume_ref['to_vsa_id']: db.vsa_update(context, volume_ref['to_vsa_id'], - dict(status=FLAGS.vsa_status_failed)) + dict(status=VsaState.FAILED)) raise #return super(VsaScheduler, self).schedule_create_volume(context, # volume_id, *_args, **_kwargs) diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py index c3150fa9c..3c9136e14 100644 --- a/nova/tests/api/openstack/contrib/test_vsa.py +++ b/nova/tests/api/openstack/contrib/test_vsa.py @@ -234,6 +234,7 @@ def _get_default_volume_param(): 'availability_zone': 'nova', 'created_at': None, 'attach_status': 'detached', + 'name': 'vol name', 'display_name': 'Default vol name', 'display_description': 'Default vol description', 'from_vsa_id': None, @@ -386,7 +387,8 @@ class VSAVolumeApiTest(test.TestCase): def test_vsa_volume_update(self): obj_num = 234 if self.test_objs == "volumes" else 345 - update = {"status": "available"} + update = {"status": "available", + "displayName": "Test Display name"} body = {self.test_obj: update} req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ (self.test_objs, obj_num)) diff --git a/nova/tests/test_drive_types.py b/nova/tests/test_drive_types.py index 8534bcde5..e91c41321 100644 --- a/nova/tests/test_drive_types.py +++ b/nova/tests/test_drive_types.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/volume/driver.py b/nova/volume/driver.py index b93fc1d92..2e3da57b2 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -507,15 +507,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) if not iscsi_properties['target_discovered']: - # zadara-begin: Bug in cactus. _run_iscsiadm() cannot accept - # multiple args for iscsi-command. Like in --op new. Hence - # using a local version here which does the same thing - (out, err) = self._execute('sudo', 'iscsiadm', '--op', 'new', - '-m', 'node', - '-T', iscsi_properties['target_iqn'], - '-p', iscsi_properties['target_portal']) - # self._run_iscsiadm(iscsi_properties, ('--op', 'new')) - # zadara-end + self._run_iscsiadm(iscsi_properties, ('--op', 'new')) if iscsi_properties.get('auth_method'): self._iscsiadm_update(iscsi_properties, @@ -567,15 +559,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) self._iscsiadm_update(iscsi_properties, "node.startup", "manual") self._run_iscsiadm(iscsi_properties, "--logout") - # zadara-begin: Bug in cactus. _run_iscsiadm() cannot accept - # multiple args for iscsi-command. Like in --op delete. Hence - # using a local version here which does the same thing - (out, err) = self._execute('sudo', 'iscsiadm', '--op', 'delete', - '-m', 'node', - '-T', iscsi_properties['target_iqn'], - '-p', iscsi_properties['target_portal']) - #self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) - # zadara-end + self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) def check_for_export(self, context, volume_id): """Make sure volume is exported.""" @@ -916,6 +900,7 @@ class ZadaraBEDriver(ISCSIDriver): ret = self._common_be_export(context, volume, iscsi_target) except: raise exception.ProcessExecutionError + return ret def remove_export(self, context, volume): """Removes BE export for a volume.""" diff --git a/nova/vsa/__init__.py b/nova/vsa/__init__.py index a94a6b7a4..779b7fb65 100644 --- a/nova/vsa/__init__.py +++ b/nova/vsa/__init__.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 80637cc9e..99793efa3 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -19,12 +20,10 @@ Handles all requests relating to Virtual Storage Arrays (VSAs). """ -#import datetime import sys import base64 from xml.etree import ElementTree -from xml.etree.ElementTree import Element, SubElement from nova import db from nova import exception @@ -47,6 +46,15 @@ flags.DEFINE_boolean('vsa_multi_vol_creation', True, LOG = logging.getLogger('nova.vsa') +class VsaState: + CREATING = 'creating' # VSA creating (not ready yet) + LAUNCHING = 'launching' # Launching VCs (all BE volumes were created) + CREATED = 'created' # VSA fully created and ready for use + PARTIAL = 'partial' # Some BE storage allocations failed + FAILED = 'failed' # Some BE storage allocations failed + DELETING = 'deleting' # VSA started the deletion procedure + + class API(base.Base): """API for interacting with the VSA manager.""" @@ -160,7 +168,7 @@ class API(base.Base): 'instance_type_id': instance_type['id'], 'image_ref': vc_image_href, 'vc_count': vc_count, - 'status': FLAGS.vsa_status_creating, + 'status': VsaState.CREATING, } LOG.info(_("Creating VSA: %s") % options) @@ -178,7 +186,7 @@ class API(base.Base): storage, shared) except exception.ApiError: self.update_vsa_status(context, vsa_id, - status=FLAGS.vsa_status_failed) + status=VsaState.FAILED) raise # after creating DB entry, re-check and set some defaults @@ -227,7 +235,7 @@ class API(base.Base): availability_zone=availability_zone) except: self.update_vsa_status(context, vsa_id, - status=FLAGS.vsa_status_partial) + status=VsaState.PARTIAL) raise if len(volume_params) == 0: @@ -369,7 +377,9 @@ class API(base.Base): return self.db.vsa_get_all_by_project(context, context.project_id) def generate_user_data(self, context, vsa, volumes): - e_vsa = Element("vsa") + SubElement = ElementTree.SubElement + + e_vsa = ElementTree.Element("vsa") e_vsa_detail = SubElement(e_vsa, "id") e_vsa_detail.text = str(vsa['id']) diff --git a/nova/vsa/connection.py b/nova/vsa/connection.py index 6c61acee4..5de8021a7 100644 --- a/nova/vsa/connection.py +++ b/nova/vsa/connection.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py index 5bec96047..86ff76b96 100644 --- a/nova/vsa/drive_types.py +++ b/nova/vsa/drive_types.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/vsa/fake.py b/nova/vsa/fake.py index 308d21fec..d96138255 100644 --- a/nova/vsa/fake.py +++ b/nova/vsa/fake.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index c67358672..1390f8146 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -1,6 +1,7 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -30,6 +31,7 @@ from nova import exception from nova import compute from nova import volume from nova import vsa +from nova.vsa.api import VsaState from nova.compute import instance_types @@ -114,9 +116,9 @@ class VsaManager(manager.SchedulerDependentManager): """Start VCs for VSA """ vsa_id = vsa['id'] - if vsa['status'] == FLAGS.vsa_status_creating: + if vsa['status'] == VsaState.CREATING: self.vsa_api.update_vsa_status(context, vsa_id, - FLAGS.vsa_status_launching) + VsaState.LAUNCHING) else: return @@ -144,8 +146,7 @@ class VsaManager(manager.SchedulerDependentManager): if has_failed_volumes: LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals()) self.vsa_api.delete_be_volumes(context, vsa_id, force_delete=True) - self.vsa_api.update_vsa_status(context, vsa_id, - FLAGS.vsa_status_failed) + self.vsa_api.update_vsa_status(context, vsa_id, VsaState.FAILED) return # create user-data record for VC @@ -170,5 +171,4 @@ class VsaManager(manager.SchedulerDependentManager): user_data=storage_data, vsa_id=vsa_id) - self.vsa_api.update_vsa_status(context, vsa_id, - FLAGS.vsa_status_created) + self.vsa_api.update_vsa_status(context, vsa_id, VsaState.CREATED) -- cgit From a0a3f0157d6f4e8563a5a1e4ee1bde92388f25fc Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Mon, 25 Jul 2011 16:58:09 -0700 Subject: volume name change. some cleanup --- nova/db/sqlalchemy/models.py | 20 -------------------- nova/flags.py | 1 - nova/vsa/api.py | 12 ++++++++---- 3 files changed, 8 insertions(+), 25 deletions(-) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index fbc8e9e19..42b97867d 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -279,14 +279,6 @@ class VirtualStorageArray(BASE, NovaBase): vol_count = Column(Integer, default=0) # total number of BE volumes status = Column(String(255)) - #admin_pass = Column(String(255)) - - #disks = relationship(VsaDiskAssociation, - # backref=backref('vsa', uselist=False), - # foreign_keys=id, - # primaryjoin='and_(VsaDiskAssociation.vsa_id == ' - # 'VirtualStorageArray.id)') - class InstanceActions(BASE, NovaBase): """Represents a guest VM's actions and results""" @@ -401,18 +393,6 @@ class DriveTypes(BASE, NovaBase): primaryjoin='and_(Volume.drive_type_id == ' 'DriveTypes.id)') -# -#class VsaDiskAssociation(BASE, NovaBase): -# """associates drive types with Virtual Storage Arrays.""" -# __tablename__ = 'vsa_disk_association' -# -# id = Column(Integer, primary_key=True, autoincrement=True) -# -# drive_type_id = Column(Integer, ForeignKey('drive_types.id')) -# vsa_id = Column(Integer, ForeignKey('virtual_storage_arrays.id')) -# -# disk_num = Column(Integer, nullable=False) # number of disks - class Quota(BASE, NovaBase): """Represents a single quota override for a project. diff --git a/nova/flags.py b/nova/flags.py index 9f5965919..c192b5281 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -369,7 +369,6 @@ DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager', 'Manager for vsa') DEFINE_string('vc_image_name', 'vc_image', 'the VC image ID (for a VC image that exists in DB Glance)') -#--------------------------------------------------------------------- # VSA constants and enums DEFINE_string('default_vsa_instance_type', 'm1.small', 'default instance type for VSA instances') diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 99793efa3..9b2750d82 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -67,7 +67,8 @@ class API(base.Base): return instance_types.get_instance_type_by_name( FLAGS.default_vsa_instance_type) - def _check_storage_parameters(self, context, vsa_name, storage, shared): + def _check_storage_parameters(self, context, vsa_name, storage, + shared, first_index=0): """ Translates storage array of disks to the list of volumes :param storage: List of dictionaries with following keys: @@ -105,13 +106,16 @@ class API(base.Base): size = 0 # special handling for full drives for i in range(num_volumes): - # VP-TODO: potentialy may conflict with previous volumes - volume_name = vsa_name + ("_%s_vol-%d" % (name, i)) + # volume_name = vsa_name + ("_%s_vol-%d" % (name, i)) + volume_name = "drive-%03d" % first_index + first_index += 1 + volume_desc = 'BE volume for VSA %s type %s' % \ + (vsa_name, name) volume = { 'size': size, 'snapshot_id': None, 'name': volume_name, - 'description': 'BE volume for ' + volume_name, + 'description': volume_desc, 'drive_ref': drive_ref } volume_params.append(volume) -- cgit From a72f2e29e2a35791a1c53f4f606948572ab52280 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Tue, 26 Jul 2011 13:25:34 -0700 Subject: VSA volume creation/deletion changes --- nova/db/sqlalchemy/api.py | 1 + nova/tests/test_vsa.py | 5 +- nova/tests/test_vsa_volumes.py | 108 +++++++++++++++++++++++++++++++++++++++++ nova/volume/api.py | 12 ++++- 4 files changed, 122 insertions(+), 4 deletions(-) create mode 100644 nova/tests/test_vsa_volumes.py diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 3b14f114a..50037e259 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -2205,6 +2205,7 @@ def volume_update(context, volume_id, values): volume_ref = volume_get(context, volume_id, session=session) volume_ref.update(values) volume_ref.save(session=session) + return volume_ref ################### diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py index 859fe3325..8e4d58960 100644 --- a/nova/tests/test_vsa.py +++ b/nova/tests/test_vsa.py @@ -60,8 +60,9 @@ class VsaTestCase(test.TestCase): raise exception.ImageNotFound return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} - self.stubs.Set(nova.image.fake._FakeImageService, 'show_by_name', - fake_show_by_name) + self.stubs.Set(nova.image.fake._FakeImageService, + 'show_by_name', + fake_show_by_name) def tearDown(self): self.stubs.UnsetAll() diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py new file mode 100644 index 000000000..0facd3b1b --- /dev/null +++ b/nova/tests/test_vsa_volumes.py @@ -0,0 +1,108 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stubout + +from nova import exception +from nova import flags +from nova import vsa +from nova import volume +from nova import db +from nova import context +from nova import test +from nova import log as logging +import nova.image.fake + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.vsa.volumes') + + +def _default_volume_param(): + return { + 'size': 1, + 'snapshot_id': None, + 'name': 'Test volume name', + 'description': 'Test volume desc name' + } + + +class VsaVolumesTestCase(test.TestCase): + + def setUp(self): + super(VsaVolumesTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.vsa_api = vsa.API() + self.volume_api = volume.API() + + self.context_non_admin = context.RequestContext(None, None) + self.context = context.get_admin_context() + + def fake_show_by_name(meh, context, name): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} + + self.stubs.Set(nova.image.fake._FakeImageService, + 'show_by_name', + fake_show_by_name) + + param = {'display_name': 'VSA name test'} + vsa_ref = self.vsa_api.create(self.context, **param) + self.vsa_id = vsa_ref['id'] + + def tearDown(self): + self.vsa_api.delete(self.context, self.vsa_id) + self.stubs.UnsetAll() + super(VsaVolumesTestCase, self).tearDown() + + def test_vsa_volume_create_delete(self): + """ Check if volume properly created and deleted. """ + vols1 = self.volume_api.get_all_by_vsa(self.context, + self.vsa_id, "from") + volume_param = _default_volume_param() + volume_param['from_vsa_id'] = self.vsa_id + volume_ref = self.volume_api.create(self.context, **volume_param) + + self.assertEqual(volume_ref['display_name'], + volume_param['name']) + self.assertEqual(volume_ref['display_description'], + volume_param['description']) + self.assertEqual(volume_ref['size'], + volume_param['size']) + self.assertEqual(volume_ref['status'], + 'available') + + vols2 = self.volume_api.get_all_by_vsa(self.context, + self.vsa_id, "from") + self.assertEqual(len(vols1) + 1, len(vols2)) + + self.volume_api.delete(self.context, volume_ref['id']) + vols3 = self.volume_api.get_all_by_vsa(self.context, + self.vsa_id, "from") + self.assertEqual(len(vols3) + 1, len(vols2)) + + def test_vsa_volume_delete_nonavail_volume(self): + """ Check volume deleton in different states. """ + volume_param = _default_volume_param() + volume_param['from_vsa_id'] = self.vsa_id + volume_ref = self.volume_api.create(self.context, **volume_param) + + self.volume_api.update(self.context, + volume_ref['id'], {'status': 'in-use'}) + self.assertRaises(exception.ApiError, + self.volume_api.delete, + self.context, volume_ref['id']) + + self.volume_api.update(self.context, + volume_ref['id'], {'status': 'error'}) + self.volume_api.delete(self.context, volume_ref['id']) diff --git a/nova/volume/api.py b/nova/volume/api.py index df55e9dc3..6b220cc54 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -80,6 +80,10 @@ class API(base.Base): volume = self.db.volume_create(context, options) if from_vsa_id is not None: # for FE VSA volumes do nothing + now = utils.utcnow() + volume = self.db.volume_update(context, + volume['id'], {'status': 'available', + 'launched_at': now}) return volume rpc.cast(context, @@ -100,14 +104,18 @@ class API(base.Base): def delete(self, context, volume_id): volume = self.get(context, volume_id) - if volume['status'] != "available": - raise exception.ApiError(_("Volume status must be available")) if volume['from_vsa_id'] is not None: + if volume['status'] == "in-use": + raise exception.ApiError(_("Volume is in use. "\ + "Detach it first")) self.db.volume_destroy(context, volume['id']) LOG.debug(_("volume %d: deleted successfully"), volume['id']) return + if volume['status'] != "available": + raise exception.ApiError(_("Volume status must be available")) + now = utils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) -- cgit From 336b2703ef90fcd7b422434434c9967880b97204 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Tue, 26 Jul 2011 13:28:23 -0700 Subject: pep8 compliance --- nova/tests/test_vsa_volumes.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py index 0facd3b1b..e1d4cd756 100644 --- a/nova/tests/test_vsa_volumes.py +++ b/nova/tests/test_vsa_volumes.py @@ -67,7 +67,7 @@ class VsaVolumesTestCase(test.TestCase): def test_vsa_volume_create_delete(self): """ Check if volume properly created and deleted. """ - vols1 = self.volume_api.get_all_by_vsa(self.context, + vols1 = self.volume_api.get_all_by_vsa(self.context, self.vsa_id, "from") volume_param = _default_volume_param() volume_param['from_vsa_id'] = self.vsa_id @@ -82,7 +82,7 @@ class VsaVolumesTestCase(test.TestCase): self.assertEqual(volume_ref['status'], 'available') - vols2 = self.volume_api.get_all_by_vsa(self.context, + vols2 = self.volume_api.get_all_by_vsa(self.context, self.vsa_id, "from") self.assertEqual(len(vols1) + 1, len(vols2)) @@ -90,7 +90,7 @@ class VsaVolumesTestCase(test.TestCase): vols3 = self.volume_api.get_all_by_vsa(self.context, self.vsa_id, "from") self.assertEqual(len(vols3) + 1, len(vols2)) - + def test_vsa_volume_delete_nonavail_volume(self): """ Check volume deleton in different states. """ volume_param = _default_volume_param() -- cgit From 401de172b86a13010885e70bc78351e72a7dfde3 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Wed, 27 Jul 2011 22:49:16 -0700 Subject: prior to nova-1336 merge --- nova/scheduler/vsa.py | 77 ++-- nova/tests/api/openstack/test_extensions.py | 7 +- nova/tests/scheduler/test_vsa_scheduler.py | 616 ++++++++++++++++++++++++++++ nova/tests/test_vsa.py | 2 + nova/tests/test_vsa_volumes.py | 23 +- nova/volume/driver.py | 4 +- nova/vsa/api.py | 41 +- nova/vsa/manager.py | 2 +- 8 files changed, 689 insertions(+), 83 deletions(-) create mode 100644 nova/tests/scheduler/test_vsa_scheduler.py diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index 6931afc2b..f66ce989c 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -65,40 +65,29 @@ class VsaScheduler(simple.SimpleScheduler): {"method": "notification", "args": {"event": event}}) - def _compare_names(self, str1, str2): - result = str1.lower() == str2.lower() - # LOG.debug(_("Comparing %(str1)s and %(str2)s. "\ - # "Result %(result)s"), locals()) - return result - - def _compare_sizes_exact_match(self, cap_capacity, size_gb): - cap_capacity = BYTES_TO_GB(int(cap_capacity)) - size_gb = int(size_gb) - result = cap_capacity == size_gb - # LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\ - # "Result %(result)s"), locals()) - return result - - def _compare_sizes_approxim(self, cap_capacity, size_gb): - cap_capacity = BYTES_TO_GB(int(cap_capacity)) - size_gb = int(size_gb) - size_perc = size_gb * FLAGS.drive_type_approx_capacity_percent / 100 - - result = cap_capacity >= size_gb - size_perc and \ - cap_capacity <= size_gb + size_perc - # LOG.debug(_("Comparing %(cap_capacity)d and %(size_gb)d. "\ - # "Result %(result)s"), locals()) - return result - def _qosgrp_match(self, drive_type, qos_values): + def _compare_names(str1, str2): + result = str1.lower() == str2.lower() + return result + + def _compare_sizes_approxim(cap_capacity, size_gb): + cap_capacity = BYTES_TO_GB(int(cap_capacity)) + size_gb = int(size_gb) + size_perc = size_gb * \ + FLAGS.drive_type_approx_capacity_percent / 100 + + result = cap_capacity >= size_gb - size_perc and \ + cap_capacity <= size_gb + size_perc + return result + # Add more entries for additional comparisons compare_list = [{'cap1': 'DriveType', 'cap2': 'type', - 'cmp_func': self._compare_names}, + 'cmp_func': _compare_names}, {'cap1': 'DriveCapacity', 'cap2': 'size_gb', - 'cmp_func': self._compare_sizes_approxim}] + 'cmp_func': _compare_sizes_approxim}] for cap in compare_list: if cap['cap1'] in qos_values.keys() and \ @@ -106,20 +95,23 @@ class VsaScheduler(simple.SimpleScheduler): cap['cmp_func'] is not None and \ cap['cmp_func'](qos_values[cap['cap1']], drive_type[cap['cap2']]): - # LOG.debug(("One of required capabilities found: %s:%s"), - # cap['cap1'], drive_type[cap['cap2']]) pass else: return False return True + def _get_service_states(self): + return self.zone_manager.service_states + def _filter_hosts(self, topic, request_spec, host_list=None): + LOG.debug(_("_filter_hosts: %(request_spec)s"), locals()) + drive_type = request_spec['drive_type'] LOG.debug(_("Filter hosts for drive type %s"), drive_type['name']) if host_list is None: - host_list = self.zone_manager.service_states.iteritems() + host_list = self._get_service_states().iteritems() filtered_hosts = [] # returns list of (hostname, capability_dict) for host, host_dict in host_list: @@ -131,7 +123,6 @@ class VsaScheduler(simple.SimpleScheduler): for qosgrp, qos_values in gos_info.iteritems(): if self._qosgrp_match(drive_type, qos_values): if qos_values['AvailableCapacity'] > 0: - # LOG.debug(_("Adding host %s to the list"), host) filtered_hosts.append((host, gos_info)) else: LOG.debug(_("Host %s has no free capacity. Skip"), @@ -226,7 +217,7 @@ class VsaScheduler(simple.SimpleScheduler): "args": {"volume_id": volume_ref['id'], "snapshot_id": None}}) - def _check_host_enforcement(self, availability_zone): + def _check_host_enforcement(self, context, availability_zone): if (availability_zone and ':' in availability_zone and context.is_admin): @@ -273,16 +264,10 @@ class VsaScheduler(simple.SimpleScheduler): vol['capabilities'] = qos_cap self._consume_resource(qos_cap, vol['size'], -1) - # LOG.debug(_("Volume %(name)s assigned to host %(host)s"), - # locals()) - def schedule_create_volumes(self, context, request_spec, availability_zone, *_args, **_kwargs): """Picks hosts for hosting multiple volumes.""" - LOG.debug(_("Service states BEFORE %s"), - self.zone_manager.service_states) - num_volumes = request_spec.get('num_volumes') LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") % locals()) @@ -290,16 +275,13 @@ class VsaScheduler(simple.SimpleScheduler): vsa_id = request_spec.get('vsa_id') volume_params = request_spec.get('volumes') - host = self._check_host_enforcement(availability_zone) + host = self._check_host_enforcement(context, availability_zone) try: self._assign_hosts_to_volumes(context, volume_params, host) for vol in volume_params: self._provision_volume(context, vol, vsa_id, availability_zone) - - LOG.debug(_("Service states AFTER %s"), - self.zone_manager.service_states) except: if vsa_id: db.vsa_update(context, vsa_id, @@ -309,8 +291,6 @@ class VsaScheduler(simple.SimpleScheduler): if 'capabilities' in vol: self._consume_resource(vol['capabilities'], vol['size'], 1) - LOG.debug(_("Service states AFTER %s"), - self.zone_manager.service_states) raise return None @@ -319,7 +299,8 @@ class VsaScheduler(simple.SimpleScheduler): """Picks the best host based on requested drive type capability.""" volume_ref = db.volume_get(context, volume_id) - host = self._check_host_enforcement(volume_ref['availability_zone']) + host = self._check_host_enforcement(context, + volume_ref['availability_zone']) if host: now = utils.utcnow() db.volume_update(context, volume_id, {'host': host, @@ -333,9 +314,6 @@ class VsaScheduler(simple.SimpleScheduler): volume_id, *_args, **_kwargs) drive_type = dict(drive_type) - LOG.debug(_("Service states BEFORE %s"), - self.zone_manager.service_states) - LOG.debug(_("Spawning volume %(volume_id)s with drive type "\ "%(drive_type)s"), locals()) @@ -358,9 +336,6 @@ class VsaScheduler(simple.SimpleScheduler): db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) self._consume_resource(qos_cap, volume_ref['size'], -1) - - LOG.debug(_("Service states AFTER %s"), - self.zone_manager.service_states) return host def _consume_full_drive(self, qos_values, direction): diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index d459c694f..2febe50e5 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -97,8 +97,9 @@ class ExtensionControllerTest(unittest.TestCase): data = json.loads(response.body) names = [x['name'] for x in data['extensions']] names.sort() - self.assertEqual(names, ["FlavorExtraSpecs", "Floating_ips", - "Fox In Socks", "Hosts", "Multinic", "Volumes"]) + self.assertEqual(names, ["DriveTypes", "FlavorExtraSpecs", + "Floating_ips", "Fox In Socks", "Hosts", "Multinic", "VSAs", + "Volumes"]) # Make sure that at least Fox in Sox is correct. (fox_ext,) = [ @@ -145,7 +146,7 @@ class ExtensionControllerTest(unittest.TestCase): # Make sure we have all the extensions. exts = root.findall('{0}extension'.format(NS)) - self.assertEqual(len(exts), 6) + self.assertEqual(len(exts), 8) # Make sure that at least Fox in Sox is correct. (fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX'] diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py new file mode 100644 index 000000000..697ad3842 --- /dev/null +++ b/nova/tests/scheduler/test_vsa_scheduler.py @@ -0,0 +1,616 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stubout + +import nova +from nova import exception +from nova import flags +from nova import db +from nova import context +from nova import test +from nova import utils +from nova import log as logging + +from nova.scheduler import vsa as vsa_sched +from nova.scheduler import driver + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.scheduler.vsa') + +scheduled_volumes = [] +scheduled_volume = {} +global_volume = {} + + +class FakeVsaLeastUsedScheduler( + vsa_sched.VsaSchedulerLeastUsedHost): + # No need to stub anything at the moment + pass + + +class FakeVsaMostAvailCapacityScheduler( + vsa_sched.VsaSchedulerMostAvailCapacity): + # No need to stub anything at the moment + pass + + +class VsaSchedulerTestCase(test.TestCase): + + def _get_vol_creation_request(self, num_vols, drive_ix, size=0): + volume_params = [] + for i in range(num_vols): + drive_type = {'id': i, + 'name': 'name_' + str(drive_ix), + 'type': 'type_' + str(drive_ix), + 'size_gb': 1 + 100 * (drive_ix)} + volume = {'size': size, + 'snapshot_id': None, + 'name': 'vol_' + str(i), + 'description': None, + 'drive_ref': drive_type} + volume_params.append(volume) + + return {'num_volumes': len(volume_params), + 'vsa_id': 123, + 'volumes': volume_params} + + def _generate_default_service_states(self): + service_states = {} + for i in range(self.host_num): + host = {} + hostname = 'host_' + str(i) + if hostname in self.exclude_host_list: + continue + + host['volume'] = {'timestamp': utils.utcnow(), + 'drive_qos_info': {}} + + for j in range(self.drive_type_start_ix, + self.drive_type_start_ix + self.drive_type_num): + dtype = {} + dtype['Name'] = 'name_' + str(j) + dtype['DriveType'] = 'type_' + str(j) + dtype['TotalDrives'] = 2 * (self.init_num_drives + i) + dtype['DriveCapacity'] = vsa_sched.GB_TO_BYTES(1 + 100 * j) + dtype['TotalCapacity'] = dtype['TotalDrives'] * \ + dtype['DriveCapacity'] + dtype['AvailableCapacity'] = (dtype['TotalDrives'] - i) * \ + dtype['DriveCapacity'] + dtype['DriveRpm'] = 7200 + dtype['DifCapable'] = 0 + dtype['SedCapable'] = 0 + dtype['PartitionDrive'] = { + 'PartitionSize': 0, + 'NumOccupiedPartitions': 0, + 'NumFreePartitions': 0} + dtype['FullDrive'] = { + 'NumFreeDrives': dtype['TotalDrives'] - i, + 'NumOccupiedDrives': i} + host['volume']['drive_qos_info'][dtype['Name']] = dtype + + service_states[hostname] = host + + return service_states + + def _print_service_states(self): + for host, host_val in self.service_states.iteritems(): + LOG.info(_("Host %s"), host) + total_used = 0 + total_available = 0 + qos = host_val['volume']['drive_qos_info'] + + for k, d in qos.iteritems(): + LOG.info("\t%s: type %s: drives (used %2d, total %2d) "\ + "size %3d, total %4d, used %4d, avail %d", + k, d['DriveType'], + d['FullDrive']['NumOccupiedDrives'], d['TotalDrives'], + vsa_sched.BYTES_TO_GB(d['DriveCapacity']), + vsa_sched.BYTES_TO_GB(d['TotalCapacity']), + vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \ + d['AvailableCapacity']), + vsa_sched.BYTES_TO_GB(d['AvailableCapacity'])) + + total_used += vsa_sched.BYTES_TO_GB(d['TotalCapacity'] - \ + d['AvailableCapacity']) + total_available += vsa_sched.BYTES_TO_GB( + d['AvailableCapacity']) + LOG.info("Host %s: used %d, avail %d", + host, total_used, total_available) + + def _set_service_states(self, host_num, + drive_type_start_ix, drive_type_num, + init_num_drives=10, + exclude_host_list=[]): + self.host_num = host_num + self.drive_type_start_ix = drive_type_start_ix + self.drive_type_num = drive_type_num + self.exclude_host_list = exclude_host_list + self.init_num_drives = init_num_drives + self.service_states = self._generate_default_service_states() + + def _get_service_states(self): + return self.service_states + + def _fake_get_service_states(self): + return self._get_service_states() + + def _fake_provision_volume(self, context, vol, vsa_id, availability_zone): + global scheduled_volumes + scheduled_volumes.append(dict(vol=vol, + vsa_id=vsa_id, + az=availability_zone)) + name = vol['name'] + host = vol['host'] + LOG.debug(_("Test: provision vol %(name)s on host %(host)s"), + locals()) + LOG.debug(_("\t vol=%(vol)s"), locals()) + pass + + def _fake_vsa_update(self, context, vsa_id, values): + LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\ + "values=%(values)s"), locals()) + pass + + def _fake_volume_create(self, context, options): + LOG.debug(_("Test: Volume create: %s"), options) + options['id'] = 123 + global global_volume + global_volume = options + return options + + def _fake_volume_get(self, context, volume_id): + LOG.debug(_("Test: Volume get request: id=%(volume_id)s"), locals()) + global global_volume + global_volume['id'] = volume_id + global_volume['availability_zone'] = None + return global_volume + + def _fake_volume_update(self, context, volume_id, values): + LOG.debug(_("Test: Volume update request: id=%(volume_id)s "\ + "values=%(values)s"), locals()) + global scheduled_volume + scheduled_volume = {'id': volume_id, 'host': values['host']} + pass + + def _fake_service_get_by_args(self, context, host, binary): + return "service" + + def _fake_service_is_up_True(self, service): + return True + + def _fake_service_is_up_False(self, service): + return False + + def setUp(self, sched_class=None): + super(VsaSchedulerTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.context_non_admin = context.RequestContext(None, None) + self.context = context.get_admin_context() + + if sched_class is None: + self.sched = FakeVsaLeastUsedScheduler() + else: + self.sched = sched_class + + self.host_num = 10 + self.drive_type_num = 5 + + self.stubs.Set(self.sched, + '_get_service_states', self._fake_get_service_states) + self.stubs.Set(self.sched, + '_provision_volume', self._fake_provision_volume) + self.stubs.Set(nova.db, 'vsa_update', self._fake_vsa_update) + + self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get) + self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update) + + def tearDown(self): + self.stubs.UnsetAll() + super(VsaSchedulerTestCase, self).tearDown() + + def test_vsa_sched_create_volumes_simple(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_1', 'host_3']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_0') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_2') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4') + + cur = self._get_service_states() + for host in ['host_0', 'host_2', 'host_4']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_2'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_2'] + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + def test_vsa_sched_no_drive_type(self): + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1) + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=6) + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone=None) + + def test_vsa_sched_no_enough_drives(self): + global scheduled_volumes + scheduled_volumes = [] + + self._set_service_states(host_num=3, + drive_type_start_ix=0, + drive_type_num=1, + init_num_drives=0) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=0) + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone=None) + + # check that everything was returned back + cur = self._get_service_states() + for k, v in prev.iteritems(): + self.assertEqual(prev[k]['volume']['drive_qos_info'], + cur[k]['volume']['drive_qos_info']) + + def test_vsa_sched_wrong_topic(self): + self._set_service_states(host_num=1, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1) + states = self._get_service_states() + new_states = {} + new_states['host_0'] = {'compute': states['host_0']['volume']} + self.service_states = new_states + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone=None) + + def test_vsa_sched_provision_volume(self): + global global_volume + global_volume = {} + self._set_service_states(host_num=1, + drive_type_start_ix=0, + drive_type_num=1, + init_num_drives=1) + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.stubs.UnsetAll() + self.stubs.Set(self.sched, + '_get_service_states', self._fake_get_service_states) + self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(request_spec['volumes'][0]['name'], + global_volume['display_name']) + + def test_vsa_sched_no_free_drives(self): + self._set_service_states(host_num=1, + drive_type_start_ix=0, + drive_type_num=1, + init_num_drives=1) + request_spec = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + cur = self._get_service_states() + cur_dtype = cur['host_0']['volume']['drive_qos_info']['name_0'] + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], 1) + + new_request = self._get_vol_creation_request(num_vols=1, drive_ix=0) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + self._print_service_states() + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + new_request, + availability_zone=None) + + def test_vsa_sched_forced_host(self): + global scheduled_volumes + scheduled_volumes = [] + + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10) + + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2) + + self.assertRaises(exception.HostBinaryNotFound, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone="nova:host_5") + + self.stubs.Set(nova.db, + 'service_get_by_args', self._fake_service_get_by_args) + self.stubs.Set(self.sched, + 'service_is_up', self._fake_service_is_up_False) + + self.assertRaises(driver.WillNotSchedule, + self.sched.schedule_create_volumes, + self.context, + request_spec, + availability_zone="nova:host_5") + + self.stubs.Set(self.sched, + 'service_is_up', self._fake_service_is_up_True) + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone="nova:host_5") + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_5') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_5') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_5') + + def test_vsa_sched_create_volumes_partition(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=5, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1, + exclude_host_list=['host_0', 'host_2']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, + drive_ix=3, + size=50) + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_1') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_4') + + cur = self._get_service_states() + for host in ['host_1', 'host_3', 'host_4']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_3'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_3'] + + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 0) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 1) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumFreePartitions'], 5) + + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumFreePartitions'], 0) + self.assertEqual(prev_dtype['PartitionDrive'] + ['PartitionSize'], 0) + + def test_vsa_sched_create_single_volume_az(self): + global scheduled_volume + scheduled_volume = {} + + def _fake_volume_get_az(context, volume_id): + LOG.debug(_("Test: Volume get: id=%(volume_id)s"), locals()) + return {'id': volume_id, 'availability_zone': 'nova:host_3'} + + self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az) + self.stubs.Set(nova.db, + 'service_get_by_args', self._fake_service_get_by_args) + self.stubs.Set(self.sched, + 'service_is_up', self._fake_service_is_up_True) + + host = self.sched.schedule_create_volume(self.context, + 123, availability_zone=None) + + self.assertEqual(host, 'host_3') + self.assertEqual(scheduled_volume['id'], 123) + self.assertEqual(scheduled_volume['host'], 'host_3') + + def test_vsa_sched_create_single_non_vsa_volume(self): + global scheduled_volume + scheduled_volume = {} + + global global_volume + global_volume = {} + global_volume['drive_type'] = None + + self.assertRaises(driver.NoValidHost, + self.sched.schedule_create_volume, + self.context, + 123, + availability_zone=None) + + def test_vsa_sched_create_single_volume(self): + global scheduled_volume + scheduled_volume = {} + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_0', 'host_1']) + prev = self._generate_default_service_states() + + global global_volume + global_volume = {} + + drive_ix = 2 + drive_type = {'id': drive_ix, + 'name': 'name_' + str(drive_ix), + 'type': 'type_' + str(drive_ix), + 'size_gb': 1 + 100 * (drive_ix)} + + global_volume['drive_type'] = drive_type + global_volume['size'] = 0 + + host = self.sched.schedule_create_volume(self.context, + 123, availability_zone=None) + + self.assertEqual(host, 'host_2') + self.assertEqual(scheduled_volume['id'], 123) + self.assertEqual(scheduled_volume['host'], 'host_2') + + +class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase): + + def setUp(self): + super(VsaSchedulerTestCaseMostAvail, self).setUp( + FakeVsaMostAvailCapacityScheduler()) + + def tearDown(self): + self.stubs.UnsetAll() + super(VsaSchedulerTestCaseMostAvail, self).tearDown() + + def test_vsa_sched_create_single_volume(self): + global scheduled_volume + scheduled_volume = {} + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_0', 'host_1']) + prev = self._generate_default_service_states() + + global global_volume + global_volume = {} + + drive_ix = 2 + drive_type = {'id': drive_ix, + 'name': 'name_' + str(drive_ix), + 'type': 'type_' + str(drive_ix), + 'size_gb': 1 + 100 * (drive_ix)} + + global_volume['drive_type'] = drive_type + global_volume['size'] = 0 + + host = self.sched.schedule_create_volume(self.context, + 123, availability_zone=None) + + self.assertEqual(host, 'host_9') + self.assertEqual(scheduled_volume['id'], 123) + self.assertEqual(scheduled_volume['host'], 'host_9') + + def test_vsa_sched_create_volumes_simple(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=10, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=10, + exclude_host_list=['host_1', 'host_3']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, drive_ix=2) + + self._print_service_states() + + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_9') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_8') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_7') + + cur = self._get_service_states() + for host in ['host_9', 'host_8', 'host_7']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_2'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_2'] + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + def test_vsa_sched_create_volumes_partition(self): + global scheduled_volumes + scheduled_volumes = [] + self._set_service_states(host_num=5, + drive_type_start_ix=0, + drive_type_num=5, + init_num_drives=1, + exclude_host_list=['host_0', 'host_2']) + prev = self._generate_default_service_states() + request_spec = self._get_vol_creation_request(num_vols=3, + drive_ix=3, + size=50) + self.sched.schedule_create_volumes(self.context, + request_spec, + availability_zone=None) + + self.assertEqual(len(scheduled_volumes), 3) + self.assertEqual(scheduled_volumes[0]['vol']['host'], 'host_4') + self.assertEqual(scheduled_volumes[1]['vol']['host'], 'host_3') + self.assertEqual(scheduled_volumes[2]['vol']['host'], 'host_1') + + cur = self._get_service_states() + for host in ['host_1', 'host_3', 'host_4']: + cur_dtype = cur[host]['volume']['drive_qos_info']['name_3'] + prev_dtype = prev[host]['volume']['drive_qos_info']['name_3'] + + self.assertEqual(cur_dtype['DriveType'], prev_dtype['DriveType']) + self.assertEqual(cur_dtype['FullDrive']['NumFreeDrives'], + prev_dtype['FullDrive']['NumFreeDrives'] - 1) + self.assertEqual(cur_dtype['FullDrive']['NumOccupiedDrives'], + prev_dtype['FullDrive']['NumOccupiedDrives'] + 1) + + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 0) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumOccupiedPartitions'], 1) + self.assertEqual(cur_dtype['PartitionDrive'] + ['NumFreePartitions'], 5) + self.assertEqual(prev_dtype['PartitionDrive'] + ['NumFreePartitions'], 0) + self.assertEqual(prev_dtype['PartitionDrive'] + ['PartitionSize'], 0) diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py index 8e4d58960..cff23a800 100644 --- a/nova/tests/test_vsa.py +++ b/nova/tests/test_vsa.py @@ -22,6 +22,7 @@ from xml.etree.ElementTree import Element, SubElement from nova import exception from nova import flags from nova import vsa +from nova import volume from nova import db from nova import context from nova import test @@ -50,6 +51,7 @@ class VsaTestCase(test.TestCase): super(VsaTestCase, self).setUp() self.stubs = stubout.StubOutForTesting() self.vsa_api = vsa.API() + self.volume_api = volume.API() self.context_non_admin = context.RequestContext(None, None) self.context = context.get_admin_context() diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py index e1d4cd756..d451a4377 100644 --- a/nova/tests/test_vsa_volumes.py +++ b/nova/tests/test_vsa_volumes.py @@ -61,7 +61,8 @@ class VsaVolumesTestCase(test.TestCase): self.vsa_id = vsa_ref['id'] def tearDown(self): - self.vsa_api.delete(self.context, self.vsa_id) + if self.vsa_id: + self.vsa_api.delete(self.context, self.vsa_id) self.stubs.UnsetAll() super(VsaVolumesTestCase, self).tearDown() @@ -106,3 +107,23 @@ class VsaVolumesTestCase(test.TestCase): self.volume_api.update(self.context, volume_ref['id'], {'status': 'error'}) self.volume_api.delete(self.context, volume_ref['id']) + + def test_vsa_volume_delete_vsa_with_volumes(self): + """ Check volume deleton in different states. """ + + vols1 = self.volume_api.get_all_by_vsa(self.context, + self.vsa_id, "from") + for i in range(3): + volume_param = _default_volume_param() + volume_param['from_vsa_id'] = self.vsa_id + volume_ref = self.volume_api.create(self.context, **volume_param) + + vols2 = self.volume_api.get_all_by_vsa(self.context, + self.vsa_id, "from") + self.assertEqual(len(vols1) + 3, len(vols2)) + + self.vsa_api.delete(self.context, self.vsa_id) + + vols3 = self.volume_api.get_all_by_vsa(self.context, + self.vsa_id, "from") + self.assertEqual(len(vols1), len(vols3)) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 2e3da57b2..98d115088 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -507,7 +507,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) if not iscsi_properties['target_discovered']: - self._run_iscsiadm(iscsi_properties, ('--op', 'new')) + self._run_iscsiadm(iscsi_properties, '--op=new') if iscsi_properties.get('auth_method'): self._iscsiadm_update(iscsi_properties, @@ -559,7 +559,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) self._iscsiadm_update(iscsi_properties, "node.startup", "manual") self._run_iscsiadm(iscsi_properties, "--logout") - self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) + self._run_iscsiadm(iscsi_properties, '--op=delete') def check_for_export(self, context, volume_id): """Make sure volume is exported.""" diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 9b2750d82..39f7d1431 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -312,9 +312,8 @@ class API(base.Base): def _force_volume_delete(self, ctxt, volume): """Delete a volume, bypassing the check that it must be available.""" host = volume['host'] - - if not host: - # Volume not yet assigned to host + if not host or volume['from_vsa_id']: + # Volume not yet assigned to host OR FE volume # Deleting volume from database and skipping rpc. self.db.volume_destroy(ctxt, volume['id']) return @@ -324,41 +323,33 @@ class API(base.Base): {"method": "delete_volume", "args": {"volume_id": volume['id']}}) - def delete_be_volumes(self, context, vsa_id, force_delete=True): + def delete_vsa_volumes(self, context, vsa_id, direction, + force_delete=True): + if direction == "FE": + volumes = self.db.volume_get_all_assigned_from_vsa(context, vsa_id) + else: + volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id) - be_volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id) - for volume in be_volumes: + for volume in volumes: try: vol_name = volume['name'] - LOG.info(_("VSA ID %(vsa_id)s: Deleting BE volume "\ - "%(vol_name)s"), locals()) + LOG.info(_("VSA ID %(vsa_id)s: Deleting %(direction)s "\ + "volume %(vol_name)s"), locals()) self.volume_api.delete(context, volume['id']) except exception.ApiError: LOG.info(_("Unable to delete volume %s"), volume['name']) if force_delete: - LOG.info(_("VSA ID %(vsa_id)s: Forced delete. BE volume "\ - "%(vol_name)s"), locals()) + LOG.info(_("VSA ID %(vsa_id)s: Forced delete. "\ + "%(direction)s volume %(vol_name)s"), locals()) self._force_volume_delete(context, volume) def delete(self, context, vsa_id): """Terminate a VSA instance.""" LOG.info(_("Going to try to terminate VSA ID %s"), vsa_id) - # allow deletion of volumes in "abnormal" state - - # Delete all FE volumes - fe_volumes = self.db.volume_get_all_assigned_from_vsa(context, vsa_id) - for volume in fe_volumes: - try: - vol_name = volume['name'] - LOG.info(_("VSA ID %(vsa_id)s: Deleting FE volume "\ - "%(vol_name)s"), locals()) - self.volume_api.delete(context, volume['id']) - except exception.ApiError: - LOG.info(_("Unable to delete volume %s"), volume['name']) - - # Delete all BE volumes - self.delete_be_volumes(context, vsa_id, force_delete=True) + # Delete all FrontEnd and BackEnd volumes + self.delete_vsa_volumes(context, vsa_id, "FE", force_delete=True) + self.delete_vsa_volumes(context, vsa_id, "BE", force_delete=True) # Delete all VC instances instances = self.db.instance_get_all_by_vsa(context, vsa_id) diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index 1390f8146..e963d26c5 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -145,7 +145,7 @@ class VsaManager(manager.SchedulerDependentManager): if has_failed_volumes: LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals()) - self.vsa_api.delete_be_volumes(context, vsa_id, force_delete=True) + self.vsa_api.delete_vsa_volumes(context, vsa_id, "BE", True) self.vsa_api.update_vsa_status(context, vsa_id, VsaState.FAILED) return -- cgit From b4159d95c32382d124c3f3f0a49f8ad9f2d41036 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 28 Jul 2011 00:27:16 -0700 Subject: some minor cosmetic work. addressed some dead code section --- bin/nova-vsa | 3 --- nova/api/openstack/contrib/drive_types.py | 4 ---- nova/db/sqlalchemy/api.py | 2 -- .../sqlalchemy/migrate_repo/versions/036_add_vsa_data.py | 12 ------------ nova/flags.py | 2 -- nova/scheduler/vsa.py | 14 +++++--------- 6 files changed, 5 insertions(+), 32 deletions(-) diff --git a/bin/nova-vsa b/bin/nova-vsa index a67fe952d..07f998117 100755 --- a/bin/nova-vsa +++ b/bin/nova-vsa @@ -18,7 +18,6 @@ # under the License. """Starter script for Nova VSA.""" -import gettext import os import sys @@ -30,8 +29,6 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) - from nova import flags from nova import log as logging from nova import service diff --git a/nova/api/openstack/contrib/drive_types.py b/nova/api/openstack/contrib/drive_types.py index 6454fd81f..f2cbd3715 100644 --- a/nova/api/openstack/contrib/drive_types.py +++ b/nova/api/openstack/contrib/drive_types.py @@ -18,18 +18,14 @@ """ The Drive Types extension for Virtual Storage Arrays""" - from webob import exc from nova.vsa import drive_types from nova import exception -from nova import db -from nova import quota from nova import log as logging from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import faults -from nova.api.openstack import wsgi LOG = logging.getLogger("nova.api.drive_types") diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e17859f69..d71d8787b 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -3526,8 +3526,6 @@ def vsa_destroy(context, vsa_id): """ session = get_session() with session.begin(): - #vsa_ref = vsa_get(context, vsa_id, session=session) - #vsa_ref.delete(session=session) session.query(models.VirtualStorageArray).\ filter_by(id=vsa_id).\ update({'deleted': True, diff --git a/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py index 5d2e56a7e..3b39ff493 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/036_add_vsa_data.py @@ -96,18 +96,6 @@ drive_types = Table('drive_types', meta, Column('visible', Boolean(create_constraint=True, name=None)), ) -#vsa_disk_association = Table('vsa_disk_association', meta, -# Column('created_at', DateTime(timezone=False)), -# Column('updated_at', DateTime(timezone=False)), -# Column('deleted_at', DateTime(timezone=False)), -# Column('deleted', Boolean(create_constraint=True, name=None)), -# Column('id', Integer(), primary_key=True, nullable=False), -# Column('drive_type_id', Integer(), ForeignKey('drive_types.id')), -# Column('vsa_id', Integer(), ForeignKey('virtual_storage_arrays.id')), -# Column('disk_num', Integer(), nullable=False), -# ) - -#new_tables = (virtual_storage_arrays, drive_types, vsa_disk_association) new_tables = (virtual_storage_arrays, drive_types) # diff --git a/nova/flags.py b/nova/flags.py index c192b5281..7e9be5d84 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -365,8 +365,6 @@ DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager', 'Manager for volume') DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') -DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager', - 'Manager for vsa') DEFINE_string('vc_image_name', 'vc_image', 'the VC image ID (for a VC image that exists in DB Glance)') # VSA constants and enums diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index f66ce989c..ed5039f4d 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -34,8 +34,6 @@ from nova import log as logging LOG = logging.getLogger('nova.scheduler.vsa') FLAGS = flags.FLAGS -flags.DEFINE_integer('gb_to_bytes_shift', 30, - 'Conversion shift between GB and bytes') flags.DEFINE_integer('drive_type_approx_capacity_percent', 10, 'The percentage range for capacity comparison') flags.DEFINE_integer('vsa_unique_hosts_per_alloc', 10, @@ -45,11 +43,11 @@ flags.DEFINE_boolean('vsa_select_unique_drives', True, def BYTES_TO_GB(bytes): - return bytes >> FLAGS.gb_to_bytes_shift + return bytes >> 30 def GB_TO_BYTES(gb): - return gb << FLAGS.gb_to_bytes_shift + return gb << 30 class VsaScheduler(simple.SimpleScheduler): @@ -68,8 +66,7 @@ class VsaScheduler(simple.SimpleScheduler): def _qosgrp_match(self, drive_type, qos_values): def _compare_names(str1, str2): - result = str1.lower() == str2.lower() - return result + return str1.lower() == str2.lower() def _compare_sizes_approxim(cap_capacity, size_gb): cap_capacity = BYTES_TO_GB(int(cap_capacity)) @@ -77,9 +74,8 @@ class VsaScheduler(simple.SimpleScheduler): size_perc = size_gb * \ FLAGS.drive_type_approx_capacity_percent / 100 - result = cap_capacity >= size_gb - size_perc and \ - cap_capacity <= size_gb + size_perc - return result + return cap_capacity >= size_gb - size_perc and \ + cap_capacity <= size_gb + size_perc # Add more entries for additional comparisons compare_list = [{'cap1': 'DriveType', -- cgit From 16cbba0838e9a2ac712b91b103dc794b0edebd00 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 28 Jul 2011 00:45:16 -0700 Subject: more commented code removed --- nova/api/openstack/contrib/virtual_storage_arrays.py | 2 -- nova/scheduler/vsa.py | 2 -- nova/volume/manager.py | 3 --- 3 files changed, 7 deletions(-) diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index 68a00fd7d..842573f8a 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -180,8 +180,6 @@ class VsaVolumeDriveController(volumes.VolumeController): ]}}} def __init__(self): - # self.compute_api = compute.API() - # self.vsa_api = vsa.API() self.volume_api = volume.API() super(VsaVolumeDriveController, self).__init__() diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index ed5039f4d..10c9b5a02 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -324,8 +324,6 @@ class VsaScheduler(simple.SimpleScheduler): db.vsa_update(context, volume_ref['to_vsa_id'], dict(status=VsaState.FAILED)) raise - #return super(VsaScheduler, self).schedule_create_volume(context, - # volume_id, *_args, **_kwargs) if host: now = utils.utcnow() diff --git a/nova/volume/manager.py b/nova/volume/manager.py index e46f8536d..fd1d5acfa 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -265,9 +265,6 @@ class VolumeManager(manager.SchedulerDependentManager): return error_list def _volume_stats_changed(self, stat1, stat2): - #LOG.info(_("stat1=%s"), stat1) - #LOG.info(_("stat2=%s"), stat2) - if len(stat1) != len(stat2): return True for (k, v) in stat1.iteritems(): -- cgit From f4359a7789ae96a36aaab8f53aa3234d13b1725a Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 28 Jul 2011 15:54:02 -0700 Subject: returned vsa_manager, nova-manage arg and print changes --- bin/nova-manage | 39 ++++++++++++++++++++------------------- nova/api/ec2/cloud.py | 4 ++++ nova/flags.py | 2 ++ nova/vsa/api.py | 2 -- 4 files changed, 26 insertions(+), 21 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 78b88e9ba..19793197c 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -1018,15 +1018,15 @@ class VsaCommands(object): project=project) def _list(self, vsas): - format_str = "%-5s %-15s %-25s %-30s %-5s %-10s %-10s %-10s %10s" + format_str = "%-5s %-15s %-25s %-10s %-6s %-9s %-10s %-10s %10s" if len(vsas): print format_str %\ (_('ID'), _('vsa_id'), _('displayName'), - _('description'), - _('count'), _('vc_type'), + _('vc_cnt'), + _('drive_cnt'), _('status'), _('AZ'), _('createTime')) @@ -1036,9 +1036,9 @@ class VsaCommands(object): (vsa['vsaId'], vsa['name'], vsa['displayName'], - vsa['displayDescription'], - vsa['vcCount'], vsa['vcType'], + vsa['vcCount'], + vsa['volCount'], vsa['status'], vsa['availabilityZone'], str(vsa['createTime'])) @@ -1053,7 +1053,8 @@ class VsaCommands(object): @args('--instance_type', dest='instance_type_name', metavar="", help='Instance type name') @args('--image', dest='image_name', metavar="", help='Image name') - @args('--shared', dest='shared', metavar="", help='Use shared drives') + @args('--shared', dest='shared', action="store_true", default=False, + help='Use shared drives') @args('--az', dest='az', metavar="", help='Availability zone') def create(self, storage='[]', name=None, description=None, vc_count=1, instance_type_name=None, image_name=None, shared=None, @@ -1079,9 +1080,9 @@ class VsaCommands(object): if instance_type_name == '': instance_type_name = None - if shared is None or shared == "--full_drives": + if shared in [None, False, "--full_drives"]: shared = False - elif shared == "--shared": + elif shared in [True, "--shared"]: shared = True else: raise ValueError(_('Shared parameter should be set either to "\ @@ -1181,7 +1182,7 @@ class VsaDriveTypeCommands(object): visible=None, name=None): """Create drive type.""" - if visible is None or visible in ["--show", "show"]: + if visible in [None, "--show", "show"]: visible = True elif visible in ["--hide", "hide"]: visible = False @@ -1223,7 +1224,7 @@ class VsaDriveTypeCommands(object): def list(self, visible=None, name=None): """Describe all available VSA drive types (or particular one).""" - visible = False if visible == "--all" or visible == False else True + visible = False if visible in ["--all", False] else True if name is not None: name = [name] @@ -1245,21 +1246,21 @@ class VsaDriveTypeCommands(object): capabilities='', visible=None): """Update drive type.""" - if visible is None or visible in ["--show", "show"]: - visible = True - elif visible in ["--hide", "hide"]: - visible = False - else: - raise ValueError(_('Visible parameter should be set to --show '\ - 'or --hide')) - values = { 'type': type, 'size_gb': size_gb, 'rpm': rpm, 'capabilities': capabilities, - 'visible': visible } + if visible: + if visible in ["--show", "show"]: + values['visible'] = True + elif visible in ["--hide", "hide"]: + values['visible'] = False + else: + raise ValueError(_("Visible parameter should be set to "\ + "--show or --hide")) + self.controller.update_drive_type(context.get_admin_context(), name, **values) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ca1fef51f..0a0644351 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -965,6 +965,10 @@ class CloudController(object): vsa['vcType'] = p_vsa['vsa_instance_type'].get('name', None) else: vsa['vcType'] = None + + vols = self.volume_api.get_all_by_vsa(context, p_vsa['id'], "to") + vsa['volCount'] = 0 if vols is None else len(vols) + return vsa def create_vsa(self, context, **kwargs): diff --git a/nova/flags.py b/nova/flags.py index 7e9be5d84..c192b5281 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -365,6 +365,8 @@ DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager', 'Manager for volume') DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') +DEFINE_string('vsa_manager', 'nova.vsa.manager.VsaManager', + 'Manager for vsa') DEFINE_string('vc_image_name', 'vc_image', 'the VC image ID (for a VC image that exists in DB Glance)') # VSA constants and enums diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 39f7d1431..0baba6180 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -205,12 +205,10 @@ class API(base.Base): # create volumes if FLAGS.vsa_multi_vol_creation: if len(volume_params) > 0: - #filter_class = 'nova.scheduler.vsa.InstanceTypeFilter' request_spec = { 'num_volumes': len(volume_params), 'vsa_id': vsa_id, 'volumes': volume_params, - #'filter': filter_class, } rpc.cast(context, -- cgit From bd39829cc1908cb5ead899c9659a5c516b073a4f Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Tue, 9 Aug 2011 16:55:51 -0700 Subject: merge with nova-1411. fixed --- nova/api/ec2/cloud.py | 2 +- nova/api/openstack/contrib/floating_ips.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 9094f6b56..ac0ff713b 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -1149,7 +1149,7 @@ class CloudController(object): return {'driveTypeSet': [dict(drive) for drive in drives]} @staticmethod - def _convert_to_set(self, lst, label): + def _convert_to_set(lst, label): if lst is None or lst == []: return None if not isinstance(lst, list): diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 52c9c6cf9..2aba1068a 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -102,7 +102,7 @@ class FloatingIPController(object): def delete(self, req, id): context = req.environ['nova.context'] ip = self.network_api.get_floating_ip(context, id) - + if 'fixed_ip' in ip: try: self.disassociate(req, id, '') -- cgit From 820d28dcf09088b5878d4cd5dcb5f4765e0b4992 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Tue, 9 Aug 2011 18:14:41 -0700 Subject: Dropped vsa_id from instances --- nova/compute/api.py | 8 +++--- nova/db/api.py | 12 --------- nova/db/sqlalchemy/api.py | 30 +--------------------- .../migrate_repo/versions/037_add_vsa_data.py | 7 ----- nova/db/sqlalchemy/models.py | 3 --- nova/vsa/manager.py | 3 ++- 6 files changed, 6 insertions(+), 57 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 42e627712..4ac0ffef2 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -151,7 +151,7 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None, vsa_id=None): + reservation_id=None): """Verify all the input parameters regardless of the provisioning strategy being performed.""" @@ -247,7 +247,6 @@ class API(base.Base): 'os_type': os_type, 'architecture': architecture, 'vm_mode': vm_mode, - 'vsa_id': vsa_id, 'root_device_name': root_device_name} return (num_instances, base_options, image) @@ -469,8 +468,7 @@ class API(base.Base): key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, injected_files=None, admin_password=None, zone_blob=None, - reservation_id=None, block_device_mapping=None, - vsa_id=None): + reservation_id=None, block_device_mapping=None): """ Provision the instances by sending off a series of single instance requests to the Schedulers. This is fine for trival @@ -491,7 +489,7 @@ class API(base.Base): key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, zone_blob, - reservation_id, vsa_id) + reservation_id) block_device_mapping = block_device_mapping or [] instances = [] diff --git a/nova/db/api.py b/nova/db/api.py index 59baf94dd..0b6995f90 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -512,23 +512,11 @@ def instance_get_all_by_project(context, project_id): return IMPL.instance_get_all_by_project(context, project_id) -def instance_get_all_by_project_and_vsa(context, project_id, vsa_id): - """Get all instance spawned by a given VSA belonging to a project.""" - return IMPL.instance_get_all_by_project_and_vsa(context, - project_id, - vsa_id) - - def instance_get_all_by_host(context, host): """Get all instance belonging to a host.""" return IMPL.instance_get_all_by_host(context, host) -def instance_get_all_by_vsa(context, vsa_id): - """Get all instance belonging to a VSA.""" - return IMPL.instance_get_all_by_vsa(context, vsa_id) - - def instance_get_all_by_reservation(context, reservation_id): """Get all instances belonging to a reservation.""" return IMPL.instance_get_all_by_reservation(context, reservation_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ff6d756a1..bc1a3046c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1321,35 +1321,6 @@ def instance_get_all_by_project(context, project_id): all() -@require_context -def instance_get_all_by_project_and_vsa(context, project_id, vsa_id): - authorize_project_context(context, project_id) - - session = get_session() - return session.query(models.Instance).\ - options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('instance_type')).\ - filter_by(project_id=project_id).\ - filter_by(vsa_id=vsa_id).\ - filter_by(deleted=can_read_deleted(context)).\ - all() - - -@require_admin_context -def instance_get_all_by_vsa(context, vsa_id): - session = get_session() - return session.query(models.Instance).\ - options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('instance_type')).\ - filter_by(vsa_id=vsa_id).\ - filter_by(deleted=can_read_deleted(context)).\ - all() - - @require_context def instance_get_all_by_reservation(context, reservation_id): session = get_session() @@ -3748,6 +3719,7 @@ def vsa_get_vc_ips_list(context, vsa_id): """ result = [] session = get_session() + """ VP-TODO: CHANGE THIS!!! Need to perform a search based on meta-data """ vc_instances = session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload('security_groups')).\ diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py index 3b39ff493..5a80f4e7a 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py @@ -27,15 +27,10 @@ meta = MetaData() # actual definitions of tables . # -instances = Table('instances', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - volumes = Table('volumes', meta, Column('id', Integer(), primary_key=True, nullable=False), ) -vsa_id = Column('vsa_id', Integer(), nullable=True) to_vsa_id = Column('to_vsa_id', Integer(), nullable=True) from_vsa_id = Column('from_vsa_id', Integer(), nullable=True) drive_type_id = Column('drive_type_id', Integer(), nullable=True) @@ -123,7 +118,6 @@ def upgrade(migrate_engine): logging.exception('Exception while creating table') raise - instances.create_column(vsa_id) volumes.create_column(to_vsa_id) volumes.create_column(from_vsa_id) volumes.create_column(drive_type_id) @@ -132,7 +126,6 @@ def upgrade(migrate_engine): def downgrade(migrate_engine): meta.bind = migrate_engine - instances.drop_column(vsa_id) volumes.drop_column(to_vsa_id) volumes.drop_column(from_vsa_id) volumes.drop_column(drive_type_id) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index f80029e97..236f148e4 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -243,9 +243,6 @@ class Instance(BASE, NovaBase): # assert(state in ['nostate', 'running', 'blocked', 'paused', # 'shutdown', 'shutoff', 'crashed']) - vsa_id = Column(Integer, ForeignKey('virtual_storage_arrays.id'), - nullable=True) - class VirtualStorageArray(BASE, NovaBase): """ diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index 0da6fe460..1d17340f2 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -173,6 +173,7 @@ class VsaManager(manager.SchedulerDependentManager): display_description='VC for VSA ' + vsa['display_name'], availability_zone=vsa['availability_zone'], user_data=storage_data, - vsa_id=vsa_id) + vsa_id=vsa_id, + metadata=dict(vsa_id=str(vsa_id))) self.vsa_api.update_vsa_status(context, vsa_id, VsaState.CREATED) -- cgit From 57b8f976f18b1f45de16ef8e87a6e215c009d228 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 11 Aug 2011 12:04:03 -0700 Subject: moved vsa_id to metadata. Added search my meta --- nova/db/sqlalchemy/api.py | 33 ++++++++++++++++-------- nova/tests/test_compute.py | 63 ++++++++++++++++++++++++++++++++++++++++++++++ nova/vsa/api.py | 3 ++- nova/vsa/manager.py | 1 - 4 files changed, 87 insertions(+), 13 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index bc1a3046c..b77f11abb 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1175,6 +1175,19 @@ def instance_get_all_by_filters(context, filters): return True return False + def _regexp_filter_by_metadata(instance, meta): + inst_metadata = [{node['key']: node['value']} \ + for node in instance['metadata']] + if isinstance(meta, list): + for node in meta: + if node not in inst_metadata: + return False + elif isinstance(meta, dict): + for k, v in meta.iteritems(): + if {k: v} not in inst_metadata: + return False + return True + def _regexp_filter_by_column(instance, filter_name, filter_re): try: v = getattr(instance, filter_name) @@ -1232,7 +1245,9 @@ def instance_get_all_by_filters(context, filters): query_prefix = _exact_match_filter(query_prefix, filter_name, filters.pop(filter_name)) - instances = query_prefix.all() + instances = query_prefix.\ + filter_by(deleted=can_read_deleted(context)).\ + all() if not instances: return [] @@ -1248,6 +1263,9 @@ def instance_get_all_by_filters(context, filters): filter_re = re.compile(str(filters[filter_name])) if filter_func: filter_l = lambda instance: filter_func(instance, filter_re) + elif filter_name == 'metadata': + filter_l = lambda instance: _regexp_filter_by_metadata(instance, + filters[filter_name]) else: filter_l = lambda instance: _regexp_filter_by_column(instance, filter_name, filter_re) @@ -3718,16 +3736,9 @@ def vsa_get_vc_ips_list(context, vsa_id): Retrieves IPs of instances associated with Virtual Storage Array. """ result = [] - session = get_session() - """ VP-TODO: CHANGE THIS!!! Need to perform a search based on meta-data """ - vc_instances = session.query(models.Instance).\ - options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('instance_type')).\ - filter_by(vsa_id=vsa_id).\ - filter_by(deleted=False).\ - all() + + vc_instances = instance_get_all_by_filters(context, + search_opts={'metadata': dict(vsa_id=str(vsa_id))}) for vc_instance in vc_instances: if vc_instance['fixed_ips']: for fixed in vc_instance['fixed_ips']: diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 80f7ff489..661acc980 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -1320,6 +1320,69 @@ class ComputeTestCase(test.TestCase): db.instance_destroy(c, instance_id2) db.instance_destroy(c, instance_id3) + def test_get_all_by_metadata(self): + """Test searching instances by metadata""" + + c = context.get_admin_context() + instance_id0 = self._create_instance() + instance_id1 = self._create_instance({ + 'metadata': {'key1': 'value1'}}) + instance_id2 = self._create_instance({ + 'metadata': {'key2': 'value2'}}) + instance_id3 = self._create_instance({ + 'metadata': {'key3': 'value3'}}) + instance_id4 = self._create_instance({ + 'metadata': {'key3': 'value3', + 'key4': 'value4'}}) + + # get all instances + instances = self.compute_api.get_all(c, + search_opts={'metadata': {}}) + self.assertEqual(len(instances), 5) + + # wrong key/value combination + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key1': 'value3'}}) + self.assertEqual(len(instances), 0) + + # non-existing keys + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key5': 'value1'}}) + self.assertEqual(len(instances), 0) + + # find existing instance + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key2': 'value2'}}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id2) + + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key3': 'value3'}}) + self.assertEqual(len(instances), 2) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id3 in instance_ids) + self.assertTrue(instance_id4 in instance_ids) + + # multiple criterias as a dict + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key3': 'value3', + 'key4': 'value4'}}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id4) + + # multiple criterias as a list + instances = self.compute_api.get_all(c, + search_opts={'metadata': [{'key4': 'value4'}, + {'key3': 'value3'}]}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id4) + + db.instance_destroy(c, instance_id0) + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + db.instance_destroy(c, instance_id3) + db.instance_destroy(c, instance_id4) + @staticmethod def _parse_db_block_device_mapping(bdm_ref): attr_list = ('delete_on_termination', 'device_name', 'no_device', diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 00ab96162..3588e58cc 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -355,7 +355,8 @@ class API(base.Base): self.delete_vsa_volumes(context, vsa_id, "BE", force_delete=True) # Delete all VC instances - instances = self.db.instance_get_all_by_vsa(context, vsa_id) + instances = self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(vsa_id))}) for instance in instances: name = instance['name'] LOG.debug(_("VSA ID %(vsa_id)s: Delete instance %(name)s"), diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index 1d17340f2..d98d0fcb2 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -173,7 +173,6 @@ class VsaManager(manager.SchedulerDependentManager): display_description='VC for VSA ' + vsa['display_name'], availability_zone=vsa['availability_zone'], user_data=storage_data, - vsa_id=vsa_id, metadata=dict(vsa_id=str(vsa_id))) self.vsa_api.update_vsa_status(context, vsa_id, VsaState.CREATED) -- cgit From fe8b1023bc9b800f628c0e35b29c165863b17206 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 11 Aug 2011 13:45:55 -0700 Subject: capabilities fix, run_as_root fix --- nova/scheduler/manager.py | 4 ++-- nova/scheduler/zone_manager.py | 2 -- nova/volume/driver.py | 15 ++++++++++----- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index c8b16b622..294de62e4 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -71,8 +71,8 @@ class SchedulerManager(manager.Manager): def update_service_capabilities(self, context=None, service_name=None, host=None, capabilities=None): """Process a capability update from a service node.""" - if not capability: - capability = {} + if not capabilities: + capabilities = {} self.zone_manager.update_service_capabilities(service_name, host, capabilities) diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index 71889e99f..9d05ea42e 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -197,8 +197,6 @@ class ZoneManager(object): def update_service_capabilities(self, service_name, host, capabilities): """Update the per-service capabilities based on this notification.""" - # logging.debug(_("Received %(service_name)s service update from " - # "%(host)s: %(capabilities)s") % locals()) logging.debug(_("Received %(service_name)s service update from " "%(host)s.") % locals()) service_caps = self.service_states.get(host, {}) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index f54f3b5aa..a1d7f700e 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -843,11 +843,12 @@ class ZadaraBEDriver(ISCSIDriver): qosstr = drive_type['type'] + ("_%s" % drive_type['size_gb']) try: - self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', 'create_qospart', '--qos', qosstr, '--pname', volume['name'], '--psize', sizestr, + run_as_root=True, check_exit_code=0) except exception.ProcessExecutionError: LOG.debug(_("VSA BE create_volume for %s failed"), volume['name']) @@ -861,9 +862,10 @@ class ZadaraBEDriver(ISCSIDriver): return super(ZadaraBEDriver, self).delete_volume(volume) try: - self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', 'delete_partition', '--pname', volume['name'], + run_as_root=True, check_exit_code=0) except exception.ProcessExecutionError: LOG.debug(_("VSA BE delete_volume for %s failed"), volume['name']) @@ -925,10 +927,11 @@ class ZadaraBEDriver(ISCSIDriver): return try: - self._sync_exec('sudo', '/var/lib/zadara/bin/zadara_sncfg', + self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', 'remove_export', '--pname', volume['name'], '--tid', iscsi_target, + run_as_root=True, check_exit_code=0) except exception.ProcessExecutionError: LOG.debug(_("VSA BE remove_export for %s failed"), volume['name']) @@ -954,11 +957,12 @@ class ZadaraBEDriver(ISCSIDriver): Common logic that asks zadara_sncfg to setup iSCSI target/lun for this volume """ - (out, err) = self._sync_exec('sudo', + (out, err) = self._sync_exec( '/var/lib/zadara/bin/zadara_sncfg', 'create_export', '--pname', volume['name'], '--tid', iscsi_target, + run_as_root=True, check_exit_code=0) result_xml = ElementTree.fromstring(out) @@ -980,9 +984,10 @@ class ZadaraBEDriver(ISCSIDriver): def _get_qosgroup_summary(self): """gets the list of qosgroups from Zadara BE""" try: - (out, err) = self._sync_exec('sudo', + (out, err) = self._sync_exec( '/var/lib/zadara/bin/zadara_sncfg', 'get_qosgroups_xml', + run_as_root=True, check_exit_code=0) except exception.ProcessExecutionError: LOG.debug(_("Failed to retrieve QoS info")) -- cgit From b66ea57ae10bac1656e11663e273837dfae67814 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 12 Aug 2011 12:51:54 -0700 Subject: removed VSA/drive_type code from EC2 cloud. changed nova-manage not to use cloud APIs --- bin/nova-manage | 87 ++++++++++++++------------- nova/api/ec2/__init__.py | 4 -- nova/api/ec2/cloud.py | 153 ----------------------------------------------- nova/vsa/api.py | 2 +- nova/vsa/drive_types.py | 19 +++++- 5 files changed, 65 insertions(+), 200 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index a1732cb97..3b0bf47e2 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -96,6 +96,8 @@ from nova.auth import manager from nova.cloudpipe import pipelib from nova.compute import instance_types from nova.db import migration +from nova import vsa +from nova.vsa import drive_types FLAGS = flags.FLAGS flags.DECLARE('fixed_range', 'nova.network.manager') @@ -1028,9 +1030,8 @@ class VsaCommands(object): """Methods for dealing with VSAs""" def __init__(self, *args, **kwargs): - self.controller = cloud.CloudController() self.manager = manager.AuthManager() - + self.vsa_api = vsa.API() self.context = context.get_admin_context() def _list(self, vsas): @@ -1049,15 +1050,15 @@ class VsaCommands(object): for vsa in vsas: print format_str %\ - (vsa['vsaId'], + (vsa['id'], vsa['name'], - vsa['displayName'], - vsa['vcType'], - vsa['vcCount'], - vsa['volCount'], + vsa['display_name'], + vsa['vsa_instance_type'].get('name', None), + vsa['vc_count'], + vsa['vol_count'], vsa['status'], - vsa['availabilityZone'], - str(vsa['createTime'])) + vsa['availability_zone'], + str(vsa['created_at'])) @args('--storage', dest='storage', metavar="[{'drive_name': 'type', 'num_drives': N, 'size': M},..]", @@ -1124,6 +1125,9 @@ class VsaCommands(object): if instance_type_name == '': instance_type_name = None + if image_name == '': + image_name = None + if shared in [None, False, "--full_drives"]: shared = False elif shared in [True, "--shared"]: @@ -1136,15 +1140,15 @@ class VsaCommands(object): 'display_name': name, 'display_description': description, 'vc_count': int(vc_count), - 'vc_type': instance_type_name, + 'instance_type': instance_type_name, 'image_name': image_name, + 'availability_zone': az, 'storage': storage_list, 'shared': shared, - 'placement': {'AvailabilityZone': az} } - result = self.controller.create_vsa(ctxt, **values) - self._list(result['vsaSet']) + result = self.vsa_api.create(ctxt, **values) + self._list([result]) @args('--id', dest='vsa_id', metavar="", help='VSA ID') @args('--name', dest='name', metavar="", help='VSA name') @@ -1162,32 +1166,38 @@ class VsaCommands(object): if vc_count is not None: values['vc_count'] = int(vc_count) - self.controller.update_vsa(self.context, vsa_id, **values) + vsa_id = ec2utils.ec2_id_to_id(vsa_id) + result = self.vsa_api.update(self.context, vsa_id=vsa_id, **values) + self._list([result]) @args('--id', dest='vsa_id', metavar="", help='VSA ID') def delete(self, vsa_id): """Delete a VSA.""" - self.controller.delete_vsa(self.context, vsa_id) + vsa_id = ec2utils.ec2_id_to_id(vsa_id) + self.vsa_api.delete(self.context, vsa_id) @args('--id', dest='vsa_id', metavar="", help='VSA ID (optional)') def list(self, vsa_id=None): """Describe all available VSAs (or particular one).""" + vsas = [] if vsa_id is not None: - vsa_id = [vsa_id] + internal_id = ec2utils.ec2_id_to_id(vsa_id) + vsa = self.vsa_api.get(self.context, internal_id) + vsas.append(vsa) + else: + vsas = self.vsa_api.get_all(self.context) - result = self.controller.describe_vsas(self.context, vsa_id) - self._list(result['vsaSet']) + self._list(vsas) class VsaDriveTypeCommands(object): """Methods for dealing with VSA drive types""" def __init__(self, *args, **kwargs): - self.controller = cloud.CloudController() - self.manager = manager.AuthManager() super(VsaDriveTypeCommands, self).__init__(*args, **kwargs) + self.context = context.get_admin_context() def _list(self, drives): format_str = "%-5s %-30s %-10s %-10s %-10s %-20s %-10s %s" @@ -1234,23 +1244,17 @@ class VsaDriveTypeCommands(object): raise ValueError(_('Visible parameter should be set to --show '\ 'or --hide')) - values = { - 'type': type, - 'size_gb': int(size_gb), - 'rpm': rpm, - 'capabilities': capabilities, - 'visible': visible, - 'name': name - } - result = self.controller.create_drive_type(context.get_admin_context(), - **values) - self._list(result['driveTypeSet']) + result = drive_types.create(self.context, + type, int(size_gb), rpm, + capabilities, visible, name) + self._list([result]) @args('--name', dest='name', metavar="", help='Drive name') def delete(self, name): """Delete drive type.""" - self.controller.delete_drive_type(context.get_admin_context(), name) + dtype = drive_types.get_by_name(self.context, name) + drive_types.delete(self.context, dtype['id']) @args('--name', dest='name', metavar="", help='Drive name') @args('--new_name', dest='new_name', metavar="", @@ -1258,8 +1262,9 @@ class VsaDriveTypeCommands(object): def rename(self, name, new_name=None): """Rename drive type.""" - self.controller.rename_drive_type(context.get_admin_context(), - name, new_name) + dtype = drive_types.rename(self.context, + name, new_name) + self._list([dtype]) @args('--all', dest='visible', action="store_false", help='Show all drives') @@ -1271,11 +1276,12 @@ class VsaDriveTypeCommands(object): visible = False if visible in ["--all", False] else True if name is not None: - name = [name] + drive = drive_types.get_by_name(self.context, name) + drives = [drive] + else: + drives = drive_types.get_all(self.context, visible) - result = self.controller.describe_drive_types( - context.get_admin_context(), name, visible) - self._list(result['driveTypeSet']) + self._list(drives) @args('--name', dest='name', metavar="", help='Drive name') @args('--type', dest='type', metavar="", @@ -1305,8 +1311,9 @@ class VsaDriveTypeCommands(object): raise ValueError(_("Visible parameter should be set to "\ "--show or --hide")) - self.controller.update_drive_type(context.get_admin_context(), - name, **values) + dtype = drive_types.get_by_name(self.context, name) + dtype = drive_types.update(self.context, dtype['id'], **values) + self._list([dtype]) class VolumeCommands(object): diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 35c809547..8b6e47cfb 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -268,10 +268,6 @@ class Authorizer(wsgi.Middleware): 'StartInstances': ['projectmanager', 'sysadmin'], 'StopInstances': ['projectmanager', 'sysadmin'], 'DeleteVolume': ['projectmanager', 'sysadmin'], - 'CreateVsa': ['projectmanager', 'sysadmin'], - 'DeleteVsa': ['projectmanager', 'sysadmin'], - 'DescribeVsas': ['projectmanager', 'sysadmin'], - 'DescribeDriveTypes': ['projectmanager', 'sysadmin'], 'DescribeImages': ['all'], 'DeregisterImage': ['projectmanager', 'sysadmin'], 'RegisterImage': ['projectmanager', 'sysadmin'], diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ac0ff713b..87bba58c3 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -45,8 +45,6 @@ from nova import network from nova import rpc from nova import utils from nova import volume -from nova import vsa -from nova.vsa import drive_types from nova.api.ec2 import ec2utils from nova.compute import instance_types from nova.image import s3 @@ -184,7 +182,6 @@ class CloudController(object): self.compute_api = compute.API( network_api=self.network_api, volume_api=self.volume_api) - self.vsa_api = vsa.API(compute_api=self.compute_api) self.setup() def __str__(self): @@ -998,156 +995,6 @@ class CloudController(object): 'status': volume['attach_status'], 'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)} - def _format_vsa(self, context, p_vsa): - vsa = {} - vsa['vsaId'] = p_vsa['id'] - vsa['status'] = p_vsa['status'] - vsa['availabilityZone'] = p_vsa['availability_zone'] - vsa['createTime'] = p_vsa['created_at'] - vsa['name'] = p_vsa['name'] - vsa['displayName'] = p_vsa['display_name'] - vsa['displayDescription'] = p_vsa['display_description'] - vsa['vcCount'] = p_vsa['vc_count'] - if p_vsa['vsa_instance_type']: - vsa['vcType'] = p_vsa['vsa_instance_type'].get('name', None) - else: - vsa['vcType'] = None - - vols = self.volume_api.get_all_by_vsa(context, p_vsa['id'], "to") - vsa['volCount'] = 0 if vols is None else len(vols) - - return vsa - - def create_vsa(self, context, **kwargs): - display_name = kwargs.get('display_name') - display_description = kwargs.get('display_description') - vc_count = int(kwargs.get('vc_count', 1)) - instance_type = instance_types.get_instance_type_by_name( - kwargs.get('vc_type', FLAGS.default_vsa_instance_type)) - image_name = kwargs.get('image_name') - availability_zone = kwargs.get('placement', {}).get( - 'AvailabilityZone') - storage = kwargs.get('storage', []) - shared = kwargs.get('shared', False) - - vc_type = instance_type['name'] - _storage = str(storage) - LOG.audit(_("Create VSA %(display_name)s vc_count:%(vc_count)d "\ - "vc_type:%(vc_type)s storage:%(_storage)s"), locals()) - - vsa = self.vsa_api.create(context, display_name, display_description, - vc_count, instance_type, image_name, - availability_zone, storage, shared) - return {'vsaSet': [self._format_vsa(context, vsa)]} - - def update_vsa(self, context, vsa_id, **kwargs): - LOG.audit(_("Update VSA %s"), vsa_id) - updatable_fields = ['display_name', 'display_description', 'vc_count'] - changes = {} - for field in updatable_fields: - if field in kwargs: - changes[field] = kwargs[field] - if changes: - vsa_id = ec2utils.ec2_id_to_id(vsa_id) - self.vsa_api.update(context, vsa_id=vsa_id, **changes) - return True - - def delete_vsa(self, context, vsa_id, **kwargs): - LOG.audit(_("Delete VSA %s"), vsa_id) - vsa_id = ec2utils.ec2_id_to_id(vsa_id) - - self.vsa_api.delete(context, vsa_id) - - return True - - def describe_vsas(self, context, vsa_id=None, status=None, - availability_zone=None, **kwargs): - LOG.audit(_("Describe VSAs")) - result = [] - vsas = [] - if vsa_id is not None: - for ec2_id in vsa_id: - internal_id = ec2utils.ec2_id_to_id(ec2_id) - vsa = self.vsa_api.get(context, internal_id) - vsas.append(vsa) - else: - vsas = self.vsa_api.get_all(context) - - if status: - result = [] - for vsa in vsas: - if vsa['status'] in status: - result.append(vsa) - vsas = result - - if availability_zone: - result = [] - for vsa in vsas: - if vsa['availability_zone'] in availability_zone: - result.append(vsa) - vsas = result - - return {'vsaSet': [self._format_vsa(context, vsa) for vsa in vsas]} - - def create_drive_type(self, context, **kwargs): - name = kwargs.get('name') - type = kwargs.get('type') - size_gb = int(kwargs.get('size_gb')) - rpm = kwargs.get('rpm') - capabilities = kwargs.get('capabilities') - visible = kwargs.get('visible', True) - - LOG.audit(_("Create Drive Type %(name)s: %(type)s %(size_gb)d "\ - "%(rpm)s %(capabilities)s %(visible)s"), - locals()) - - rv = drive_types.create(context, type, size_gb, rpm, - capabilities, visible, name) - return {'driveTypeSet': [dict(rv)]} - - def update_drive_type(self, context, name, **kwargs): - LOG.audit(_("Update Drive Type %s"), name) - - dtype = drive_types.get_by_name(context, name) - - updatable_fields = ['type', - 'size_gb', - 'rpm', - 'capabilities', - 'visible'] - changes = {} - for field in updatable_fields: - if field in kwargs and \ - kwargs[field] is not None and \ - kwargs[field] != '': - changes[field] = kwargs[field] - - if changes: - drive_types.update(context, dtype['id'], **changes) - return True - - def rename_drive_type(self, context, name, new_name): - drive_types.rename(context, name, new_name) - return True - - def delete_drive_type(self, context, name): - dtype = drive_types.get_by_name(context, name) - drive_types.delete(context, dtype['id']) - return True - - def describe_drive_types(self, context, names=None, visible=True): - - drives = [] - if names is not None: - for name in names: - drive = drive_types.get_by_name(context, name) - if drive['visible'] == visible: - drives.append(drive) - else: - drives = drive_types.get_all(context, visible) - # (VP-TMP): Change to EC2 compliant output later - return {'driveTypeSet': [dict(drive) for drive in drives]} - @staticmethod def _convert_to_set(lst, label): if lst is None or lst == []: diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 3588e58cc..19185b907 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -159,7 +159,7 @@ class API(base.Base): shared = True # check if image is ready before starting any work - if image_name is None or image_name == '': + if image_name is None: image_name = FLAGS.vc_image_name try: image_service = self.compute_api.image_service diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py index 86ff76b96..3c67fdbb9 100644 --- a/nova/vsa/drive_types.py +++ b/nova/vsa/drive_types.py @@ -64,8 +64,23 @@ def create(context, type, size_gb, rpm, capabilities='', def update(context, id, **kwargs): - LOG.debug(_("Updating drive type with id %(id)s"), locals()) - return db.drive_type_update(context, id, kwargs) + + LOG.debug(_("Updating drive type with id %(id)s: %(kwargs)s"), locals()) + + updatable_fields = ['type', + 'size_gb', + 'rpm', + 'capabilities', + 'visible'] + changes = {} + for field in updatable_fields: + if field in kwargs and \ + kwargs[field] is not None and \ + kwargs[field] != '': + changes[field] = kwargs[field] + + # call update regadless if changes is empty or not + return db.drive_type_update(context, id, changes) def rename(context, name, new_name=None): -- cgit From 711a02450d24ba7385f2f22bf70a60ecfb452cfc Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 12 Aug 2011 13:37:22 -0700 Subject: nova-manage: fixed instance type in vsa creation --- bin/nova-manage | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 3b0bf47e2..dafcd5de0 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -1124,6 +1124,8 @@ class VsaCommands(object): if instance_type_name == '': instance_type_name = None + instance_type = instance_types.get_instance_type_by_name( + instance_type_name) if image_name == '': image_name = None @@ -1140,7 +1142,7 @@ class VsaCommands(object): 'display_name': name, 'display_description': description, 'vc_count': int(vc_count), - 'instance_type': instance_type_name, + 'instance_type': instance_type, 'image_name': image_name, 'availability_zone': az, 'storage': storage_list, -- cgit From a81aae612f409bc767af3013eeccb71226831fc2 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 17 Aug 2011 11:19:34 -0400 Subject: Add modules for task and vm states. --- nova/compute/task_state.py | 28 ++++++++++++++++++++++++++++ nova/compute/vm_state.py | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 nova/compute/task_state.py create mode 100644 nova/compute/vm_state.py diff --git a/nova/compute/task_state.py b/nova/compute/task_state.py new file mode 100644 index 000000000..b4dc9af51 --- /dev/null +++ b/nova/compute/task_state.py @@ -0,0 +1,28 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible task states for instances""" + +BUILD_BLOCK_DEVICE_MAPPING='block_device_mapping' +NETWORKING='networking' + +PASSWORD='password' + +RESIZE_PREP='resize_prep' +RESIZE_MIGRATING='resize_migrating' +RESIZE_MIGRATED='resize_migrated' +RESIZE_FINISH='resize_finish' diff --git a/nova/compute/vm_state.py b/nova/compute/vm_state.py new file mode 100644 index 000000000..e81cba1f0 --- /dev/null +++ b/nova/compute/vm_state.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible vm states for instances""" + +BUILD='build' +REBUILD='rebuild' +REBOOT='reboot' +DELETE='delete' +STOP='stop' +START='start' +RESIZE='resize' +VERIFY_RESIZE='verify_resize' +PAUSE='pause' +UNPAUSE='unpause' + +SUSPEND='suspend' +RESUME='resume' + +RESCUE='rescue' +UNRESCUE='unrescue' -- cgit From 5270b0a565ec26d2f7de3a7d95be86433d8c6bd2 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 17 Aug 2011 11:46:28 -0400 Subject: Split set state into vm, task, and power state functions. --- nova/db/sqlalchemy/api.py | 46 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 57a4370d8..07207b8ee 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1439,15 +1439,43 @@ def instance_get_floating_address(context, instance_id): @require_admin_context -def instance_set_state(context, instance_id, state, description=None): - # TODO(devcamcar): Move this out of models and into driver - from nova.compute import power_state - if not description: - description = power_state.name(state) - db.instance_update(context, - instance_id, - {'state': state, - 'state_description': description}) +def instance_set_power_state(context, instance_id, power_state): + session = get_session() + partial = session.query(models.Instance) + + if utils.is_uuid_like(instance_id): + result = partial.filter_by(uuid=instance_id) + else: + result = partial.filter_by(id=instance_id) + + result.update({'power_state': power_state}) + + +@require_admin_context +def instance_set_vm_state(context, instance_id, vm_state): + # vm_state = running, halted, suspended, paused + session = get_session() + partial = session.query(models.Instance) + + if utils.is_uuid_like(instance_id): + result = partial.filter_by(uuid=instance_id) + else: + result = partial.filter_by(id=instance_id) + + result.update({'vm_state': vm_state}) + + +def instance_set_task_state(context, instance_id, task_state): + # task_state = running, halted, suspended, paused + session = get_session() + partial = session.query(models.Instance) + + if utils.is_uuid_like(instance_id): + result = partial.filter_by(uuid=instance_id) + else: + result = partial.filter_by(id=instance_id) + + result.update({'task_state': task_state}) @require_context -- cgit From 6515b115de8cd026ea88aab796d4364ccc2ac4f0 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 17 Aug 2011 11:51:39 -0400 Subject: Pep8 fixes. --- nova/db/sqlalchemy/api.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 07207b8ee..e7d02cb5d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1442,7 +1442,7 @@ def instance_get_floating_address(context, instance_id): def instance_set_power_state(context, instance_id, power_state): session = get_session() partial = session.query(models.Instance) - + if utils.is_uuid_like(instance_id): result = partial.filter_by(uuid=instance_id) else: @@ -1456,7 +1456,7 @@ def instance_set_vm_state(context, instance_id, vm_state): # vm_state = running, halted, suspended, paused session = get_session() partial = session.query(models.Instance) - + if utils.is_uuid_like(instance_id): result = partial.filter_by(uuid=instance_id) else: @@ -1469,7 +1469,7 @@ def instance_set_task_state(context, instance_id, task_state): # task_state = running, halted, suspended, paused session = get_session() partial = session.query(models.Instance) - + if utils.is_uuid_like(instance_id): result = partial.filter_by(uuid=instance_id) else: -- cgit From bd2e98c064b7c1e9c866f3013e13af7883e11e05 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Wed, 17 Aug 2011 13:30:47 -0400 Subject: Initial instance states migration. --- .../versions/037_update_instance_states.py | 57 ++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py new file mode 100644 index 000000000..0bbe39def --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table + +meta = MetaData() + +c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.state + c_state.alter(name='power_state') + + c_vm_state = instances.c.state_description + c_vm_state.alter(name='vm_state') + + instances.create_column(c_task_state) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.power_state + c_state.alter(name='state') + + c_vm_state = instances.c.vm_state + c_vm_state.alter(name='state_description') + + instances.drop_column('task_state') -- cgit From 1d1d027554d6be355bd9b52b2d87081d06f05045 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 17 Aug 2011 16:23:40 -0400 Subject: Updated compute manager/API to use vm/task states. Updated vm/task states to cover a few more cases I encountered. --- nova/compute/api.py | 57 ++++-- nova/compute/manager.py | 441 ++++++++++++++++++++++++--------------------- nova/compute/task_state.py | 17 +- nova/compute/vm_state.py | 8 +- 4 files changed, 296 insertions(+), 227 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index e909e9959..ec760853e 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -36,6 +36,7 @@ from nova import utils from nova import volume from nova.compute import instance_types from nova.compute import power_state +from nova.compute import vm_state from nova.compute.utils import terminate_volumes from nova.scheduler import api as scheduler_api from nova.db import base @@ -74,10 +75,13 @@ def generate_default_hostname(instance): def _is_able_to_shutdown(instance, instance_id): - states = {'terminating': "Instance %s is already being terminated", - 'migrating': "Instance %s is being migrated", - 'stopping': "Instance %s is being stopped"} - msg = states.get(instance['state_description']) + states = { + vm_state.DELETE: "Instance %s is already being terminated", + vm_state.MIGRATE: "Instance %s is being migrated", + vm_state.RESIZE: "Instance %s is being resized", + vm_state.STOP: "Instance %s is being stopped", + } + msg = states.get(instance['vm_state']) if msg: LOG.warning(_(msg), instance_id) return False @@ -231,8 +235,8 @@ class API(base.Base): 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', - 'state': 0, - 'state_description': 'scheduling', + 'power_state': power_state.NOSTATE, + 'vm_state': vm_state.BUILD, 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), @@ -648,9 +652,8 @@ class API(base.Base): return self.update(context, - instance['id'], - state_description='terminating', - state=0, + instance_id, + vm_state=vm_state.DELETE, terminated_at=utils.utcnow()) host = instance['host'] @@ -671,9 +674,8 @@ class API(base.Base): return self.update(context, - instance['id'], - state_description='stopping', - state=power_state.NOSTATE, + instance_id, + vm_state=vm_state.STOP, terminated_at=utils.utcnow()) host = instance['host'] @@ -685,12 +687,15 @@ class API(base.Base): """Start an instance.""" LOG.debug(_("Going to try to start %s"), instance_id) instance = self._get_instance(context, instance_id, 'starting') - if instance['state_description'] != 'stopped': - _state_description = instance['state_description'] + vm_state = instance["vm_state"] + + if vm_state != vm_state.STOP: LOG.warning(_("Instance %(instance_id)s is not " - "stopped(%(_state_description)s)") % locals()) + "stopped. (%(vm_state)s)") % locals()) return + self.update(context, instance_id, vm_state=vm_state.ACTIVE) + # TODO(yamahata): injected_files isn't supported right now. # It is used only for osapi. not for ec2 api. # availability_zone isn't used by run_instance. @@ -918,6 +923,7 @@ class API(base.Base): @scheduler_api.reroute_compute("reboot") def reboot(self, context, instance_id): """Reboot the given instance.""" + self.update(context, instance_id, vm_state=vm_state.REBOOT) self._cast_compute_message('reboot_instance', context, instance_id) @scheduler_api.reroute_compute("rebuild") @@ -925,8 +931,12 @@ class API(base.Base): metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) + invalid_rebuild_states = [ + vm_state.BUILD, + vm_state.REBUILD, + ] - if instance["state"] == power_state.BUILDING: + if instance["vm_state"] in invalid_rebuild_states: msg = _("Instance already building") raise exception.BuildInProgress(msg) @@ -946,6 +956,8 @@ class API(base.Base): "injected_files": files_to_inject, } + self.update(context, instance_id, vm_state=vm_state.REBUILD) + self._cast_compute_message('rebuild_instance', context, instance_id, @@ -963,6 +975,8 @@ class API(base.Base): raise exception.MigrationNotFoundByStatus(instance_id=instance_id, status='finished') + self.update(context, instance_id, vm_state=vm_state.ACTIVE) + params = {'migration_id': migration_ref['id']} self._cast_compute_message('revert_resize', context, instance_ref['uuid'], @@ -983,6 +997,9 @@ class API(base.Base): if not migration_ref: raise exception.MigrationNotFoundByStatus(instance_id=instance_id, status='finished') + + self.update(context, instance_id, vm_state=vm_state.ACTIVE) + params = {'migration_id': migration_ref['id']} self._cast_compute_message('confirm_resize', context, instance_ref['uuid'], @@ -1028,6 +1045,8 @@ class API(base.Base): if (current_memory_mb == new_memory_mb) and flavor_id: raise exception.CannotResizeToSameSize() + self.update(context, instance_id, vm_state=vm_state.RESIZE) + instance_ref = self._get_instance(context, instance_id, 'resize') self._cast_scheduler_message(context, {"method": "prep_resize", @@ -1061,11 +1080,13 @@ class API(base.Base): @scheduler_api.reroute_compute("pause") def pause(self, context, instance_id): """Pause the given instance.""" + self.update(context, instance_id, vm_state=vm_state.PAUSE) self._cast_compute_message('pause_instance', context, instance_id) @scheduler_api.reroute_compute("unpause") def unpause(self, context, instance_id): """Unpause the given instance.""" + self.update(context, instance_id, vm_state=vm_state.ACTIVE) self._cast_compute_message('unpause_instance', context, instance_id) def set_host_enabled(self, context, host, enabled): @@ -1092,21 +1113,25 @@ class API(base.Base): @scheduler_api.reroute_compute("suspend") def suspend(self, context, instance_id): """Suspend the given instance.""" + self.update(context, instance_id, vm_state=vm_state.SUSPEND) self._cast_compute_message('suspend_instance', context, instance_id) @scheduler_api.reroute_compute("resume") def resume(self, context, instance_id): """Resume the given instance.""" + self.update(context, instance_id, vm_state=vm_state.ACTIVE) self._cast_compute_message('resume_instance', context, instance_id) @scheduler_api.reroute_compute("rescue") def rescue(self, context, instance_id): """Rescue the given instance.""" + self.update(context, instance_id, vm_state=vm_state.RESCUE) self._cast_compute_message('rescue_instance', context, instance_id) @scheduler_api.reroute_compute("unrescue") def unrescue(self, context, instance_id): """Unrescue the given instance.""" + self.update(context, instance_id, vm_state=vm_state.ACTIVE) self._cast_compute_message('unrescue_instance', context, instance_id) @scheduler_api.reroute_compute("set_admin_password") diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3299268f2..34c6bc1ea 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -56,6 +56,8 @@ from nova import rpc from nova import utils from nova import volume from nova.compute import power_state +from nova.compute import task_state +from nova.compute import vm_state from nova.notifier import api as notifier from nova.compute.utils import terminate_volumes from nova.virt import driver @@ -146,6 +148,10 @@ class ComputeManager(manager.SchedulerDependentManager): super(ComputeManager, self).__init__(service_name="compute", *args, **kwargs) + def _instance_update(self, context, instance_id, **kwargs): + """Update an instance in the database using kwargs as value.""" + return self.db.instance_update(context, instance_id, kwargs) + def init_host(self): """Initialization for a standalone compute service.""" self.driver.init_host(host=self.host) @@ -153,8 +159,8 @@ class ComputeManager(manager.SchedulerDependentManager): instances = self.db.instance_get_all_by_host(context, self.host) for instance in instances: inst_name = instance['name'] - db_state = instance['state'] - drv_state = self._update_state(context, instance['id']) + db_state = instance['power_state'] + drv_state = self._get_power_state(context, instance) expect_running = db_state == power_state.RUNNING \ and drv_state != db_state @@ -177,34 +183,13 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.warning(_('Hypervisor driver does not ' 'support firewall rules')) - def _update_state(self, context, instance_id, state=None): - """Update the state of an instance from the driver info.""" - instance_ref = self.db.instance_get(context, instance_id) - - if state is None: - try: - LOG.debug(_('Checking state of %s'), instance_ref['name']) - info = self.driver.get_info(instance_ref['name']) - except exception.NotFound: - info = None - - if info is not None: - state = info['state'] - else: - state = power_state.FAILED - - self.db.instance_set_state(context, instance_id, state) - return state - - def _update_launched_at(self, context, instance_id, launched_at=None): - """Update the launched_at parameter of the given instance.""" - data = {'launched_at': launched_at or utils.utcnow()} - self.db.instance_update(context, instance_id, data) - - def _update_image_ref(self, context, instance_id, image_ref): - """Update the image_id for the given instance.""" - data = {'image_ref': image_ref} - self.db.instance_update(context, instance_id, data) + def _get_power_state(self, context, instance): + """Retrieve the power state for the given instance.""" + LOG.debug(_('Checking state of %s'), instance['name']) + try: + return self.driver.get_info(instance['name'])["state"] + except exception.NotFound: + return power_state.FAILED def get_console_topic(self, context, **kwargs): """Retrieves the console host for a project on this host. @@ -388,13 +373,10 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(vish): used by virt but not in database updates['injected_files'] = kwargs.get('injected_files', []) updates['admin_pass'] = kwargs.get('admin_password', None) - instance = self.db.instance_update(context, - instance_id, - updates) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'networking') + updates['vm_state'] = vm_state.BUILD + updates['task_state'] = task_state.NETWORKING + + instance = self.db.instance_update(context, instance_id, updates) is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id) try: @@ -413,6 +395,11 @@ class ComputeManager(manager.SchedulerDependentManager): # all vif creation and network injection, maybe this is correct network_info = [] + self._instance_update(context, + instance_id, + vm_state=vm_state.BUILD, + task_state=task_state.BLOCK_DEVICE_MAPPING) + (swap, ephemerals, block_device_mapping) = self._setup_block_device_mapping( context, instance_id) @@ -422,9 +409,11 @@ class ComputeManager(manager.SchedulerDependentManager): 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping} - # TODO(vish) check to make sure the availability zone matches - self._update_state(context, instance_id, power_state.BUILDING) + self._instance_update(context, + instance_id, + task_state=task_state.SPAWN) + # TODO(vish) check to make sure the availability zone matches try: self.driver.spawn(context, instance, network_info, block_device_info) @@ -433,13 +422,21 @@ class ComputeManager(manager.SchedulerDependentManager): "virtualization enabled in the BIOS? Details: " "%(ex)s") % locals() LOG.exception(msg) + return + + current_power_state = self._get_power_state(context, instance) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=None, + launched_at=utils.utcnow()) - self._update_launched_at(context, instance_id) - self._update_state(context, instance_id) usage_info = utils.usage_from_instance(instance) notifier.notify('compute.%s' % self.host, 'compute.instance.create', notifier.INFO, usage_info) + except exception.InstanceNotFound: # FIXME(wwolf): We are just ignoring InstanceNotFound # exceptions here in case the instance was immediately @@ -523,11 +520,22 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_("Rebuilding instance %s"), instance_id, context=context) - self._update_state(context, instance_id, power_state.BUILDING) + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.REBUILD, + task_state=task_state.REBUILDING) network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.destroy(instance_ref, network_info) + + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.REBUILD, + task_state=task_state.SPAWN) + image_ref = kwargs.get('image_ref') instance_ref.image_ref = image_ref instance_ref.injected_files = kwargs.get('injected_files', []) @@ -536,9 +544,15 @@ class ComputeManager(manager.SchedulerDependentManager): bd_mapping = self._setup_block_device_mapping(context, instance_id) self.driver.spawn(context, instance_ref, network_info, bd_mapping) - self._update_image_ref(context, instance_id, image_ref) - self._update_launched_at(context, instance_id) - self._update_state(context, instance_id) + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=None, + image_ref=image_ref, + launched_at=utils.utcnow()) + usage_info = utils.usage_from_instance(instance_ref, image_ref=image_ref) notifier.notify('compute.%s' % self.host, @@ -550,26 +564,34 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def reboot_instance(self, context, instance_id): """Reboot an instance on this host.""" + LOG.audit(_("Rebooting instance %s"), instance_id, context=context) context = context.elevated() - self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_("Rebooting instance %s"), instance_id, context=context) - if instance_ref['state'] != power_state.RUNNING: - state = instance_ref['state'] + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.REBOOT, + task_state=task_state.REBOOTING) + + if instance_ref['power_state'] != power_state.RUNNING: + state = instance_ref['power_state'] running = power_state.RUNNING LOG.warn(_('trying to reboot a non-running ' 'instance: %(instance_id)s (state: %(state)s ' 'expected: %(running)s)') % locals(), context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'rebooting') network_info = self._get_instance_nw_info(context, instance_ref) self.driver.reboot(instance_ref, network_info) - self._update_state(context, instance_id) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def snapshot_instance(self, context, instance_id, image_id, @@ -585,37 +607,41 @@ class ComputeManager(manager.SchedulerDependentManager): :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) """ + if image_type != "snapshot" and image_type != "backup": + raise Exception(_('Image type not recognized %s') % image_type) + context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - #NOTE(sirp): update_state currently only refreshes the state field - # if we add is_snapshotting, we will need this refreshed too, - # potentially? - self._update_state(context, instance_id) + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=image_type) LOG.audit(_('instance %s: snapshotting'), instance_id, context=context) - if instance_ref['state'] != power_state.RUNNING: - state = instance_ref['state'] + + if instance_ref['power_state'] != power_state.RUNNING: + state = instance_ref['power_state'] running = power_state.RUNNING LOG.warn(_('trying to snapshot a non-running ' 'instance: %(instance_id)s (state: %(state)s ' 'expected: %(running)s)') % locals()) self.driver.snapshot(context, instance_ref, image_id) + self._instance_update(context, instance_id, task_state=None) + + if image_type == 'snapshot' and rotation: + raise exception.ImageRotationNotAllowed() + + elif image_type == 'backup' and rotation: + instance_uuid = instance_ref['uuid'] + self.rotate_backups(context, instance_uuid, backup_type, rotation) - if image_type == 'snapshot': - if rotation: - raise exception.ImageRotationNotAllowed() elif image_type == 'backup': - if rotation: - instance_uuid = instance_ref['uuid'] - self.rotate_backups(context, instance_uuid, backup_type, - rotation) - else: - raise exception.RotationRequiredForBackup() - else: - raise Exception(_('Image type not recognized %s') % image_type) + raise exception.RotationRequiredForBackup() def rotate_backups(self, context, instance_uuid, backup_type, rotation): """Delete excess backups associated to an instance. @@ -751,40 +777,51 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def rescue_instance(self, context, instance_id): """Rescue an instance on this host.""" + LOG.audit(_('instance %s: rescuing'), instance_id, context=context) context = context.elevated() + + self._instance_update(context, + instance_id, + vm_state=vm_state.RESCUE, + task_state=task_state.RESCUING) + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: rescuing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'rescuing') - _update_state = lambda result: self._update_state_callback( - self, context, instance_id, result) network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.rescue(context, instance_ref, _update_state, network_info) - self._update_state(context, instance_id) + + # NOTE(blamar): None of the virt drivers use the 'callback' param + self.driver.rescue(context, instance_ref, None, network_info) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + vm_state=vm_state.RESCUE, + task_state=task_state.RESCUED, + power_state=current_power_state) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def unrescue_instance(self, context, instance_id): """Rescue an instance on this host.""" + LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) context = context.elevated() + + self._instance_update(context, + instance_id, + vm_state=vm_state.ACTIVE, + task_state=task_state.UNRESCUING) + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'unrescuing') - _update_state = lambda result: self._update_state_callback( - self, context, instance_id, result) network_info = self._get_instance_nw_info(context, instance_ref) - self.driver.unrescue(instance_ref, _update_state, network_info) - self._update_state(context, instance_id) - @staticmethod - def _update_state_callback(self, context, instance_id, result): - """Update instance state when async task completes.""" - self._update_state(context, instance_id) + # NOTE(blamar): None of the virt drivers use the 'callback' param + self.driver.unrescue(instance_ref, None, network_info) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + vm_state=vm_state.ACTIVE, + task_state=None, + power_state=current_power_state) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock @@ -843,11 +880,12 @@ class ComputeManager(manager.SchedulerDependentManager): # Just roll back the record. There's no need to resize down since # the 'old' VM already has the preferred attributes - self.db.instance_update(context, instance_ref['uuid'], - dict(memory_mb=instance_type['memory_mb'], - vcpus=instance_type['vcpus'], - local_gb=instance_type['local_gb'], - instance_type_id=instance_type['id'])) + self._instance_update(context, + instance_ref["uuid"], + memory_mb=instance_type['memory_mb'], + vcpus=instance_type['vcpus'], + local_gb=instance_type['local_gb'], + instance_type_id=instance_type['id']) self.driver.revert_migration(instance_ref) self.db.migration_update(context, migration_id, @@ -1000,35 +1038,45 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def pause_instance(self, context, instance_id): """Pause an instance on this host.""" + LOG.audit(_('instance %s: pausing'), instance_id, context=context) context = context.elevated() + + self._instance_update(context, + instance_id, + vm_state=vm_state.PAUSE, + task_state=task_state.PAUSING) + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: pausing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'pausing') - self.driver.pause(instance_ref, - lambda result: self._update_state_callback(self, - context, - instance_id, - result)) + self.driver.pause(instance_ref, lambda result: None) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.PAUSE, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def unpause_instance(self, context, instance_id): """Unpause a paused instance on this host.""" + LOG.audit(_('instance %s: unpausing'), instance_id, context=context) context = context.elevated() + + self._instance_update(context, + instance_id, + vm_state=vm_state.ACTIVE, + task_state=task_state.UNPAUSING) + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: unpausing'), instance_id, context=context) - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'unpausing') - self.driver.unpause(instance_ref, - lambda result: self._update_state_callback(self, - context, - instance_id, - result)) + self.driver.unpause(instance_ref, lambda result: None) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def host_power_action(self, context, host=None, action=None): @@ -1053,33 +1101,45 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def suspend_instance(self, context, instance_id): """Suspend the given instance.""" + LOG.audit(_('instance %s: suspending'), instance_id, context=context) context = context.elevated() + + self._instance_update(context, + instance_id, + vm_state=vm_state.SUSPEND, + task_state=task_state.SUSPENDING) + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: suspending'), instance_id, context=context) - self.db.instance_set_state(context, instance_id, - power_state.NOSTATE, - 'suspending') - self.driver.suspend(instance_ref, - lambda result: self._update_state_callback(self, - context, - instance_id, - result)) + self.driver.suspend(instance_ref, lambda result: None) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.SUSPEND, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock def resume_instance(self, context, instance_id): """Resume the given suspended instance.""" + LOG.audit(_('instance %s: resuming'), instance_id, context=context) context = context.elevated() + + self._instance_update(context, + instance_id, + vm_state=vm_state.ACTIVE, + task_state=task_state.RESUMING) + instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_('instance %s: resuming'), instance_id, context=context) - self.db.instance_set_state(context, instance_id, - power_state.NOSTATE, - 'resuming') - self.driver.resume(instance_ref, - lambda result: self._update_state_callback(self, - context, - instance_id, - result)) + self.driver.resume(instance_ref, lambda result: None) + + current_power_state = self._get_power_state(context, instance_ref) + self._instance_update(context, + instance_id, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def lock_instance(self, context, instance_id): @@ -1489,11 +1549,14 @@ class ComputeManager(manager.SchedulerDependentManager): 'block_migration': block_migration}}) # Restore instance state - self.db.instance_update(ctxt, - instance_ref['id'], - {'state_description': 'running', - 'state': power_state.RUNNING, - 'host': dest}) + current_power_state = self._get_power_state(ctxt, instance_ref) + self._instance_update(ctxt, + instance_ref["id"], + host=dest, + power_state=current_power_state, + vm_state=vm_state.ACTIVE, + task_state=None) + # Restore volume state for volume_ref in instance_ref['volumes']: volume_id = volume_ref['id'] @@ -1539,11 +1602,11 @@ class ComputeManager(manager.SchedulerDependentManager): This param specifies destination host. """ host = instance_ref['host'] - self.db.instance_update(context, - instance_ref['id'], - {'state_description': 'running', - 'state': power_state.RUNNING, - 'host': host}) + self._instance_update(context, + instance_ref['id'], + host=host, + vm_state=vm_state.ACTIVE, + task_state=None) for volume_ref in instance_ref['volumes']: volume_id = volume_ref['id'] @@ -1591,10 +1654,9 @@ class ComputeManager(manager.SchedulerDependentManager): error_list.append(ex) try: - self._poll_instance_states(context) + self._sync_power_states(context) except Exception as ex: - LOG.warning(_("Error during instance poll: %s"), - unicode(ex)) + LOG.warning(_("Error during power_state sync: %s"), unicode(ex)) error_list.append(ex) return error_list @@ -1609,68 +1671,39 @@ class ComputeManager(manager.SchedulerDependentManager): self.update_service_capabilities( self.driver.get_host_stats(refresh=True)) - def _poll_instance_states(self, context): - vm_instances = self.driver.list_instances_detail() - vm_instances = dict((vm.name, vm) for vm in vm_instances) + def _sync_power_states(self, context): + """Align power states between the database and the hypervisor. - # Keep a list of VMs not in the DB, cross them off as we find them - vms_not_found_in_db = list(vm_instances.keys()) + The hypervisor is authoritative for the power_state data, so we + simply loop over all known instances for this host and update the + power_state according to the hypervisor. If the instance is not found + then it will be set to power_state.NOSTATE, because it doesn't exist + on the hypervisor. + """ + vm_instances = self.driver.list_instances_detail() db_instances = self.db.instance_get_all_by_host(context, self.host) + num_vm_instances = len(vm_instances) + num_db_instances = len(db_instances) + + if num_vm_instances != num_db_instances: + LOG.info(_("Found %(num_db_instances)s in the database and " + "%(num_vm_instances)s on the hypervisor.") % locals()) + for db_instance in db_instances: - name = db_instance['name'] - db_state = db_instance['state'] + name = db_instance["name"] + db_power_state = db_instance['power_state'] vm_instance = vm_instances.get(name) if vm_instance is None: - # NOTE(justinsb): We have to be very careful here, because a - # concurrent operation could be in progress (e.g. a spawn) - if db_state == power_state.BUILDING: - # TODO(justinsb): This does mean that if we crash during a - # spawn, the machine will never leave the spawning state, - # but this is just the way nova is; this function isn't - # trying to correct that problem. - # We could have a separate task to correct this error. - # TODO(justinsb): What happens during a live migration? - LOG.info(_("Found instance '%(name)s' in DB but no VM. " - "State=%(db_state)s, so assuming spawn is in " - "progress.") % locals()) - vm_state = db_state - else: - LOG.info(_("Found instance '%(name)s' in DB but no VM. " - "State=%(db_state)s, so setting state to " - "shutoff.") % locals()) - vm_state = power_state.SHUTOFF - if db_instance['state_description'] == 'stopping': - self.db.instance_stop(context, db_instance['id']) - continue + vm_power_state = power_state.NOSTATE else: - vm_state = vm_instance.state - vms_not_found_in_db.remove(name) - - if (db_instance['state_description'] in ['migrating', 'stopping']): - # A situation which db record exists, but no instance" - # sometimes occurs while live-migration at src compute, - # this case should be ignored. - LOG.debug(_("Ignoring %(name)s, as it's currently being " - "migrated.") % locals()) - continue - - if vm_state != db_state: - LOG.info(_("DB/VM state mismatch. Changing state from " - "'%(db_state)s' to '%(vm_state)s'") % locals()) - self._update_state(context, db_instance['id'], vm_state) + vm_power_state = vm_instance["power_state"] - # NOTE(justinsb): We no longer auto-remove SHUTOFF instances - # It's quite hard to get them back when we do. - - # Are there VMs not in the DB? - for vm_not_found_in_db in vms_not_found_in_db: - name = vm_not_found_in_db + if vm_power_state == db_power_state: + continue - # We only care about instances that compute *should* know about - if name.startswith("instance-"): - # TODO(justinsb): What to do here? Adopt it? Shut it down? - LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring") - % locals()) + self._instance_update(context, + db_instance["id"], + power_state=vm_power_state) diff --git a/nova/compute/task_state.py b/nova/compute/task_state.py index b4dc9af51..55466c783 100644 --- a/nova/compute/task_state.py +++ b/nova/compute/task_state.py @@ -17,12 +17,27 @@ """Possible task states for instances""" -BUILD_BLOCK_DEVICE_MAPPING='block_device_mapping' +BLOCK_DEVICE_MAPPING='block_device_mapping' NETWORKING='networking' +SPAWN='spawn' +SNAPSHOT='snapshot' +BACKUP='backup' PASSWORD='password' RESIZE_PREP='resize_prep' RESIZE_MIGRATING='resize_migrating' RESIZE_MIGRATED='resize_migrated' RESIZE_FINISH='resize_finish' + +REBUILDING='rebuilding' + +REBOOTING='rebooting' +PAUSING='pausing' +UNPAUSING='unpausing' +SUSPENDING='suspending' +RESUMING='resuming' + +RESCUING='rescuing' +RESCUED='rescued' +UNRESCUING='unrescuing' diff --git a/nova/compute/vm_state.py b/nova/compute/vm_state.py index e81cba1f0..a1bca6ef4 100644 --- a/nova/compute/vm_state.py +++ b/nova/compute/vm_state.py @@ -17,19 +17,15 @@ """Possible vm states for instances""" +ACTIVE='active' BUILD='build' REBUILD='rebuild' REBOOT='reboot' DELETE='delete' STOP='stop' -START='start' +MIGRATE='migrate' RESIZE='resize' VERIFY_RESIZE='verify_resize' PAUSE='pause' -UNPAUSE='unpause' - SUSPEND='suspend' -RESUME='resume' - RESCUE='rescue' -UNRESCUE='unrescue' -- cgit From cabf9cc8f29ad8c99971c434516e1b911f07f32f Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Wed, 17 Aug 2011 16:27:12 -0700 Subject: nova-manage VSA print & forced update_cap changes; fixed bug with report capabilities; added IP address to VSA APIs; added instances to APIs --- bin/nova-manage | 196 ++++++++++++++++++--- .../openstack/contrib/virtual_storage_arrays.py | 107 ++++++++++- nova/tests/api/openstack/contrib/test_vsa.py | 2 + nova/volume/manager.py | 12 +- 4 files changed, 282 insertions(+), 35 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index dafcd5de0..2b9bc48b8 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -96,6 +96,8 @@ from nova.auth import manager from nova.cloudpipe import pipelib from nova.compute import instance_types from nova.db import migration +from nova import compute +from nova import volume from nova import vsa from nova.vsa import drive_types @@ -1032,33 +1034,153 @@ class VsaCommands(object): def __init__(self, *args, **kwargs): self.manager = manager.AuthManager() self.vsa_api = vsa.API() + self.compute_api = compute.API() + self.volume_api = volume.API() self.context = context.get_admin_context() - def _list(self, vsas): - format_str = "%-5s %-15s %-25s %-10s %-6s %-9s %-10s %-10s %10s" - if len(vsas): - print format_str %\ - (_('ID'), - _('vsa_id'), - _('displayName'), - _('vc_type'), - _('vc_cnt'), - _('drive_cnt'), - _('status'), - _('AZ'), - _('createTime')) + self._format_str_vsa = "%-5s %-15s %-25s %-10s %-6s "\ + "%-9s %-10s %-10s %10s" + self._format_str_volume = "\t%-4s %-15s %-5s %-10s %-20s %s" + self._format_str_drive = "\t%-4s %-15s %-5s %-10s %-20s %s" + self._format_str_instance = "\t%-4s %-10s %-20s %-12s %-10s "\ + "%-15s %-15s %-10s %-15s %s" + + def _print_vsa_header(self): + print self._format_str_vsa %\ + (_('ID'), + _('vsa_id'), + _('displayName'), + _('vc_type'), + _('vc_cnt'), + _('drive_cnt'), + _('status'), + _('AZ'), + _('createTime')) + + def _print_vsa(self, vsa): + print self._format_str_vsa %\ + (vsa['id'], + vsa['name'], + vsa['display_name'], + vsa['vsa_instance_type'].get('name', None), + vsa['vc_count'], + vsa['vol_count'], + vsa['status'], + vsa['availability_zone'], + str(vsa['created_at'])) + + def _print_volume_header(self): + print _(' === Volumes ===') + print self._format_str_volume %\ + (_('ID'), + _('name'), + _('size'), + _('status'), + _('attachment'), + _('createTime')) + + def _print_volume(self, vol): + print self._format_str_volume %\ + (vol['id'], + vol['display_name'], + vol['size'], + vol['status'], + vol['attach_status'], + str(vol['created_at'])) + + def _print_drive_header(self): + print _(' === Drives ===') + print self._format_str_drive %\ + (_('ID'), + _('name'), + _('size'), + _('status'), + _('host'), + _('createTime')) + + def _print_drive(self, drive): + print self._format_str_volume %\ + (drive['id'], + drive['display_name'], + drive['size'], + drive['status'], + drive['host'], + str(drive['created_at'])) + + def _print_instance_header(self): + print _(' === Instances ===') + print self._format_str_instance %\ + (_('ID'), + _('name'), + _('disp_name'), + _('image'), + _('type'), + _('floating_IP'), + _('fixed_IP'), + _('status'), + _('host'), + _('createTime')) + + def _print_instance(self, vc): + + fixed_addr = None + floating_addr = None + if vc['fixed_ips']: + fixed = vc['fixed_ips'][0] + fixed_addr = fixed['address'] + if fixed['floating_ips']: + floating_addr = fixed['floating_ips'][0]['address'] + floating_addr = floating_addr or fixed_addr + + print self._format_str_instance %\ + (vc['id'], + ec2utils.id_to_ec2_id(vc['id']), + vc['display_name'], + ('ami-%08x' % int(vc['image_ref'])), + vc['instance_type']['name'], + floating_addr, + fixed_addr, + vc['state_description'], + vc['host'], + str(vc['created_at'])) + + def _list(self, context, vsas, print_drives=False, + print_volumes=False, print_instances=False): + if vsas: + self._print_vsa_header() for vsa in vsas: - print format_str %\ - (vsa['id'], - vsa['name'], - vsa['display_name'], - vsa['vsa_instance_type'].get('name', None), - vsa['vc_count'], - vsa['vol_count'], - vsa['status'], - vsa['availability_zone'], - str(vsa['created_at'])) + self._print_vsa(vsa) + vsa_id = vsa.get('id') + + if print_instances: + instances = self.compute_api.get_all(context, + search_opts={'metadata': + dict(vsa_id=str(vsa_id))}) + if instances: + print + self._print_instance_header() + for instance in instances: + self._print_instance(instance) + print + + if print_drives: + drives = self.volume_api.get_all_by_vsa(context, + vsa_id, "to") + if drives: + self._print_drive_header() + for drive in drives: + self._print_drive(drive) + print + + if print_volumes: + volumes = self.volume_api.get_all_by_vsa(context, + vsa_id, "from") + if volumes: + self._print_volume_header() + for volume in volumes: + self._print_volume(volume) + print @args('--storage', dest='storage', metavar="[{'drive_name': 'type', 'num_drives': N, 'size': M},..]", @@ -1150,7 +1272,7 @@ class VsaCommands(object): } result = self.vsa_api.create(ctxt, **values) - self._list([result]) + self._list(ctxt, [result]) @args('--id', dest='vsa_id', metavar="", help='VSA ID') @args('--name', dest='name', metavar="", help='VSA name') @@ -1170,7 +1292,7 @@ class VsaCommands(object): vsa_id = ec2utils.ec2_id_to_id(vsa_id) result = self.vsa_api.update(self.context, vsa_id=vsa_id, **values) - self._list([result]) + self._list(self.context, [result]) @args('--id', dest='vsa_id', metavar="", help='VSA ID') def delete(self, vsa_id): @@ -1180,7 +1302,16 @@ class VsaCommands(object): @args('--id', dest='vsa_id', metavar="", help='VSA ID (optional)') - def list(self, vsa_id=None): + @args('--all', dest='all', action="store_true", + help='Show all available details') + @args('--drives', dest='drives', action="store_true", + help='Include drive-level details') + @args('--volumes', dest='volumes', action="store_true", + help='Include volume-level details') + @args('--instances', dest='instances', action="store_true", + help='Include instance-level details') + def list(self, vsa_id=None, all=False, + drives=False, volumes=False, instances=False): """Describe all available VSAs (or particular one).""" vsas = [] @@ -1191,7 +1322,18 @@ class VsaCommands(object): else: vsas = self.vsa_api.get_all(self.context) - self._list(vsas) + if all: + drives = volumes = instances = True + + self._list(self.context, vsas, drives, volumes, instances) + + def update_capabilities(self): + """Forces updates capabilities on all nova-volume nodes.""" + + rpc.fanout_cast(context.get_admin_context(), + FLAGS.volume_topic, + {"method": "notification", + "args": {"event": "startup"}}) class VsaDriveTypeCommands(object): diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index 842573f8a..d6c4a5ef4 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -23,6 +23,7 @@ from webob import exc from nova import vsa from nova import volume +from nova import compute from nova import db from nova import quota from nova import exception @@ -31,6 +32,7 @@ from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import faults from nova.api.openstack import wsgi +from nova.api.openstack import servers from nova.api.openstack.contrib import volumes from nova.compute import instance_types @@ -40,7 +42,7 @@ FLAGS = flags.FLAGS LOG = logging.getLogger("nova.api.vsa") -def _vsa_view(context, vsa, details=False): +def _vsa_view(context, vsa, details=False, instances=None): """Map keys for vsa summary/detailed view.""" d = {} @@ -55,11 +57,27 @@ def _vsa_view(context, vsa, details=False): if 'vsa_instance_type' in vsa: d['vcType'] = vsa['vsa_instance_type'].get('name', None) else: - d['vcType'] = None + d['vcType'] = vsa['instance_type_id'] d['vcCount'] = vsa.get('vc_count') d['driveCount'] = vsa.get('vol_count') + d['ipAddress'] = None + for instance in instances: + fixed_addr = None + floating_addr = None + if instance['fixed_ips']: + fixed = instance['fixed_ips'][0] + fixed_addr = fixed['address'] + if fixed['floating_ips']: + floating_addr = fixed['floating_ips'][0]['address'] + + if floating_addr: + d['ipAddress'] = floating_addr + break + else: + d['ipAddress'] = d['ipAddress'] or fixed_addr + return d @@ -79,10 +97,12 @@ class VsaController(object): "vcType", "vcCount", "driveCount", + "ipAddress", ]}}} def __init__(self): self.vsa_api = vsa.API() + self.compute_api = compute.API() super(VsaController, self).__init__() def _items(self, req, details): @@ -90,8 +110,13 @@ class VsaController(object): context = req.environ['nova.context'] vsas = self.vsa_api.get_all(context) limited_list = common.limited(vsas, req) - res = [_vsa_view(context, vsa, details) for vsa in limited_list] - return {'vsaSet': res} + + vsa_list = [] + for vsa in limited_list: + instances = self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(vsa.get('id')))}) + vsa_list.append(_vsa_view(context, vsa, details, instances)) + return {'vsaSet': vsa_list} def index(self, req): """Return a short list of VSAs.""" @@ -110,7 +135,10 @@ class VsaController(object): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return {'vsa': _vsa_view(context, vsa, details=True)} + instances = self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(vsa.get('id')))}) + + return {'vsa': _vsa_view(context, vsa, True, instances)} def create(self, req, body): """Create a new VSA.""" @@ -140,9 +168,12 @@ class VsaController(object): availability_zone=vsa.get('placement', {}).\ get('AvailabilityZone')) - result = self.vsa_api.create(context, **args) + vsa = self.vsa_api.create(context, **args) + + instances = self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(vsa.get('id')))}) - return {'vsa': _vsa_view(context, result, details=True)} + return {'vsa': _vsa_view(context, vsa, True, instances)} def delete(self, req, id): """Delete a VSA.""" @@ -405,6 +436,61 @@ class VsaVPoolController(object): return faults.Fault(exc.HTTPBadRequest()) +class VsaVCController(servers.ControllerV11): + """The VSA Virtual Controller API controller for the OpenStack API.""" + + def __init__(self): + self.vsa_api = vsa.API() + self.compute_api = compute.API() + self.vsa_id = None # VP-TODO: temporary ugly hack + super(VsaVCController, self).__init__() + + def _get_servers(self, req, is_detail): + """Returns a list of servers, taking into account any search + options specified. + """ + + if self.vsa_id is None: + super(VsaVCController, self)._get_servers(req, is_detail) + + context = req.environ['nova.context'] + + search_opts = {'metadata': dict(vsa_id=str(self.vsa_id))} + instance_list = self.compute_api.get_all( + context, search_opts=search_opts) + + limited_list = self._limit_items(instance_list, req) + servers = [self._build_view(req, inst, is_detail)['server'] + for inst in limited_list] + return dict(servers=servers) + + def index(self, req, vsa_id): + """Return list of instances for particular VSA.""" + + LOG.audit(_("Index instances for VSA %s"), vsa_id) + + self.vsa_id = vsa_id # VP-TODO: temporary ugly hack + result = super(VsaVCController, self).detail(req) + self.vsa_id = None + return result + + def create(self, req, vsa_id, body): + """Create a new instance for VSA.""" + return faults.Fault(exc.HTTPBadRequest()) + + def update(self, req, vsa_id, id, body): + """Update VSA instance.""" + return faults.Fault(exc.HTTPBadRequest()) + + def delete(self, req, vsa_id, id): + """Delete VSA instance.""" + return faults.Fault(exc.HTTPBadRequest()) + + def show(self, req, vsa_id, id): + """Return data about the given instance.""" + return super(VsaVCController, self).show(req, id) + + class Virtual_storage_arrays(extensions.ExtensionDescriptor): def get_name(self): @@ -455,4 +541,11 @@ class Virtual_storage_arrays(extensions.ExtensionDescriptor): collection_name='zadr-vsa')) resources.append(res) + res = extensions.ResourceExtension('instances', + VsaVCController(), + parent=dict( + member_name='vsa', + collection_name='zadr-vsa')) + resources.append(res) + return resources diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py index 3c9136e14..a9b76b0ff 100644 --- a/nova/tests/api/openstack/contrib/test_vsa.py +++ b/nova/tests/api/openstack/contrib/test_vsa.py @@ -46,6 +46,7 @@ def _get_default_vsa_param(): 'display_description': 'Test_VSA_description', 'vc_count': 1, 'instance_type': 'm1.small', + 'instance_type_id': 5, 'image_name': None, 'availability_zone': None, 'storage': [], @@ -58,6 +59,7 @@ def stub_vsa_create(self, context, **param): LOG.debug(_("_create: param=%s"), param) param['id'] = 123 param['name'] = 'Test name' + param['instance_type_id'] = 5 last_param = param return param diff --git a/nova/volume/manager.py b/nova/volume/manager.py index fd1d5acfa..b23bff1fc 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -61,6 +61,8 @@ flags.DEFINE_string('volume_driver', 'nova.volume.driver.ISCSIDriver', 'Driver to use for volume creation') flags.DEFINE_boolean('use_local_volumes', True, 'if True, will not discover local volumes') +flags.DEFINE_boolean('volume_force_update_capabilities', False, + 'if True will force update capabilities on each check') class VolumeManager(manager.SchedulerDependentManager): @@ -138,6 +140,7 @@ class VolumeManager(manager.SchedulerDependentManager): 'launched_at': now}) LOG.debug(_("volume %s: created successfully"), volume_ref['name']) self._notify_vsa(context, volume_ref, 'available') + self._reset_stats() return volume_id def _notify_vsa(self, context, volume_ref, status): @@ -158,6 +161,7 @@ class VolumeManager(manager.SchedulerDependentManager): if volume_ref['host'] != self.host: raise exception.Error(_("Volume is not local to this node")) + self._reset_stats() try: LOG.debug(_("volume %s: removing export"), volume_ref['name']) self.driver.remove_export(context, volume_ref) @@ -265,6 +269,8 @@ class VolumeManager(manager.SchedulerDependentManager): return error_list def _volume_stats_changed(self, stat1, stat2): + if FLAGS.volume_force_update_capabilities: + return True if len(stat1) != len(stat2): return True for (k, v) in stat1.iteritems(): @@ -289,6 +295,10 @@ class VolumeManager(manager.SchedulerDependentManager): # avoid repeating fanouts self.update_service_capabilities(None) + def _reset_stats(self): + LOG.info(_("Clear capabilities")) + self._last_volume_stats = [] + def notification(self, context, event): LOG.info(_("Notification {%s} received"), event) - self._last_volume_stats = [] + self._reset_stats() -- cgit From cab13dbfd652d1fcf9443e796e50f7eb374fc3fc Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 18 Aug 2011 12:34:01 -0400 Subject: Updated a number of items to pave the way for new states. --- nova/compute/manager.py | 31 +++++++++++----------- nova/db/sqlalchemy/api.py | 4 +-- .../versions/037_update_instance_states.py | 2 +- nova/db/sqlalchemy/models.py | 16 +++-------- nova/scheduler/driver.py | 11 ++++---- nova/tests/scheduler/test_scheduler.py | 13 ++++++--- nova/tests/test_compute.py | 29 ++++++++++---------- 7 files changed, 51 insertions(+), 55 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 34c6bc1ea..cb19a19cc 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -241,11 +241,6 @@ class ComputeManager(manager.SchedulerDependentManager): def _setup_block_device_mapping(self, context, instance_id): """setup volumes for block device mapping""" - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'block_device_mapping') - volume_api = volume.API() block_device_mapping = [] swap = None @@ -472,8 +467,7 @@ class ComputeManager(manager.SchedulerDependentManager): for volume in volumes: self._detach_volume(context, instance_id, volume['id'], False) - if (instance['state'] == power_state.SHUTOFF and - instance['state_description'] != 'stopped'): + if instance['power_state'] == power_state.SHUTOFF: self.db.instance_destroy(context, instance_id) raise exception.Error(_('trying to destroy already destroyed' ' instance: %s') % instance_id) @@ -532,16 +526,22 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - power_state=current_power_state, vm_state=vm_state.REBUILD, - task_state=task_state.SPAWN) + task_state=task_state.BLOCK_DEVICE_MAPPING) + + bd_mapping = self._setup_block_device_mapping(context, instance_id) image_ref = kwargs.get('image_ref') instance_ref.image_ref = image_ref instance_ref.injected_files = kwargs.get('injected_files', []) network_info = self.network_api.get_instance_nw_info(context, - instance_ref) - bd_mapping = self._setup_block_device_mapping(context, instance_id) + instance_ref) + + self._instance_update(context, + instance_id, + vm_state=vm_state.REBUILD, + task_state=task_state.SPAWN) + self.driver.spawn(context, instance_ref, network_info, bd_mapping) current_power_state = self._get_power_state(context, instance_ref) @@ -709,7 +709,7 @@ class ComputeManager(manager.SchedulerDependentManager): for i in xrange(max_tries): instance_ref = self.db.instance_get(context, instance_id) instance_id = instance_ref["id"] - instance_state = instance_ref["state"] + instance_state = instance_ref["power_state"] expected_state = power_state.RUNNING if instance_state != expected_state: @@ -744,7 +744,7 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) instance_id = instance_ref['id'] - instance_state = instance_ref['state'] + instance_state = instance_ref['power_state'] expected_state = power_state.RUNNING if instance_state != expected_state: LOG.warn(_('trying to inject a file into a non-running ' @@ -762,7 +762,7 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) instance_id = instance_ref['id'] - instance_state = instance_ref['state'] + instance_state = instance_ref['power_state'] expected_state = power_state.RUNNING if instance_state != expected_state: LOG.warn(_('trying to update agent on a non-running ' @@ -1092,7 +1092,7 @@ class ComputeManager(manager.SchedulerDependentManager): def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for an instance on this host.""" instance_ref = self.db.instance_get(context, instance_id) - if instance_ref["state"] == power_state.RUNNING: + if instance_ref["power_state"] == power_state.RUNNING: LOG.audit(_("instance %s: retrieving diagnostics"), instance_id, context=context) return self.driver.get_diagnostics(instance_ref) @@ -1682,6 +1682,7 @@ class ComputeManager(manager.SchedulerDependentManager): """ vm_instances = self.driver.list_instances_detail() + vm_instances = dict((vm.name, vm) for vm in vm_instances) db_instances = self.db.instance_get_all_by_host(context, self.host) num_vm_instances = len(vm_instances) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e7d02cb5d..67736dea2 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1079,8 +1079,8 @@ def instance_stop(context, instance_id): session.query(models.Instance).\ filter_by(id=instance_id).\ update({'host': None, - 'state': power_state.SHUTOFF, - 'state_description': 'stopped', + 'vm_state': vm_state.STOP, + 'task_state': None, 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py index 0bbe39def..07efbf90f 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py @@ -14,7 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import MetaData, Table +from sqlalchemy import MetaData, Table, Column, String meta = MetaData() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index f2a4680b0..d2987cacc 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -193,8 +193,9 @@ class Instance(BASE, NovaBase): key_name = Column(String(255)) key_data = Column(Text) - state = Column(Integer) - state_description = Column(String(255)) + power_state = Column(Integer) + vm_state = Column(String(255)) + task_state = Column(String(255)) memory_mb = Column(Integer) vcpus = Column(Integer) @@ -232,17 +233,6 @@ class Instance(BASE, NovaBase): root_device_name = Column(String(255)) - # TODO(vish): see Ewan's email about state improvements, probably - # should be in a driver base class or some such - # vmstate_state = running, halted, suspended, paused - # power_state = what we have - # task_state = transitory and may trigger power state transition - - #@validates('state') - #def validate_state(self, key, state): - # assert(state in ['nostate', 'running', 'blocked', 'paused', - # 'shutdown', 'shutoff', 'crashed']) - class InstanceActions(BASE, NovaBase): """Represents a guest VM's actions and results""" diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index f28353f05..b788b996f 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -30,6 +30,8 @@ from nova import log as logging from nova import rpc from nova import utils from nova.compute import power_state +from nova.compute import task_state +from nova.compute import vm_state from nova.api.ec2 import ec2utils @@ -104,10 +106,8 @@ class Scheduler(object): dest, block_migration) # Changing instance_state. - db.instance_set_state(context, - instance_id, - power_state.PAUSED, - 'migrating') + values = {"vm_state": vm_state.MIGRATE} + db.instance_update(context, instance_id, values) # Changing volume state for volume_ref in instance_ref['volumes']: @@ -129,8 +129,7 @@ class Scheduler(object): """ # Checking instance is running. - if (power_state.RUNNING != instance_ref['state'] or \ - 'running' != instance_ref['state_description']): + if instance_ref['power_state'] != power_state.RUNNING: instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) raise exception.InstanceNotRunning(instance_id=instance_id) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 158df2a27..1b5e131c9 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -40,6 +40,7 @@ from nova.scheduler import driver from nova.scheduler import manager from nova.scheduler import multi from nova.compute import power_state +from nova.compute import vm_state FLAGS = flags.FLAGS @@ -94,6 +95,9 @@ class SchedulerTestCase(test.TestCase): inst['vcpus'] = kwargs.get('vcpus', 1) inst['memory_mb'] = kwargs.get('memory_mb', 10) inst['local_gb'] = kwargs.get('local_gb', 20) + inst['vm_state'] = kwargs.get('vm_state', vm_state.ACTIVE) + inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) + inst['task_state'] = kwargs.get('task_state', None) return db.instance_create(ctxt, inst) def test_fallback(self): @@ -271,8 +275,9 @@ class SimpleDriverTestCase(test.TestCase): inst['memory_mb'] = kwargs.get('memory_mb', 20) inst['local_gb'] = kwargs.get('local_gb', 30) inst['launched_on'] = kwargs.get('launghed_on', 'dummy') - inst['state_description'] = kwargs.get('state_description', 'running') - inst['state'] = kwargs.get('state', power_state.RUNNING) + inst['vm_state'] = kwargs.get('vm_state', vm_state.ACTIVE) + inst['task_state'] = kwargs.get('task_state', None) + inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) return db.instance_create(self.context, inst)['id'] def _create_volume(self): @@ -664,14 +669,14 @@ class SimpleDriverTestCase(test.TestCase): block_migration=False) i_ref = db.instance_get(self.context, instance_id) - self.assertTrue(i_ref['state_description'] == 'migrating') + self.assertTrue(i_ref['vm_state'] == vm_state.MIGRATE) db.instance_destroy(self.context, instance_id) db.volume_destroy(self.context, v_ref['id']) def test_live_migration_src_check_instance_not_running(self): """The instance given by instance_id is not running.""" - instance_id = self._create_instance(state_description='migrating') + instance_id = self._create_instance(power_state=power_state.NOSTATE) i_ref = db.instance_get(self.context, instance_id) try: diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index e2fa3b140..f310eaff6 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -23,6 +23,7 @@ from nova import compute from nova.compute import instance_types from nova.compute import manager as compute_manager from nova.compute import power_state +from nova.compute import vm_state from nova import context from nova import db from nova.db.sqlalchemy import models @@ -747,8 +748,8 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', - 'state': power_state.RUNNING, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_state.ACTIVE, + 'task_state': None, 'host': i_ref['host']}) for v in i_ref['volumes']: dbmock.volume_update(c, v['id'], {'status': 'in-use'}) @@ -779,8 +780,8 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', - 'state': power_state.RUNNING, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_state.ACTIVE, + 'task_state': None, 'host': i_ref['host']}) self.compute.db = dbmock @@ -825,8 +826,8 @@ class ComputeTestCase(test.TestCase): c = context.get_admin_context() instance_id = self._create_instance() i_ref = db.instance_get(c, instance_id) - db.instance_update(c, i_ref['id'], {'state_description': 'migrating', - 'state': power_state.PAUSED}) + db.instance_update(c, i_ref['id'], {'vm_state': vm_state.MIGRATE, + 'power_state': power_state.PAUSED}) v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id}) fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1', 'instance_id': instance_id}) @@ -887,7 +888,7 @@ class ComputeTestCase(test.TestCase): instances = db.instance_get_all(context.get_admin_context()) LOG.info(_("After force-killing instances: %s"), instances) self.assertEqual(len(instances), 1) - self.assertEqual(power_state.SHUTOFF, instances[0]['state']) + self.assertEqual(power_state.NOSTATE, instances[0]['power_state']) def test_get_all_by_name_regexp(self): """Test searching instances by name (display_name)""" @@ -1307,25 +1308,25 @@ class ComputeTestCase(test.TestCase): """Test searching instances by state""" c = context.get_admin_context() - instance_id1 = self._create_instance({'state': power_state.SHUTDOWN}) + instance_id1 = self._create_instance({'power_state': power_state.SHUTDOWN}) instance_id2 = self._create_instance({ 'id': 2, - 'state': power_state.RUNNING}) + 'power_state': power_state.RUNNING}) instance_id3 = self._create_instance({ 'id': 10, - 'state': power_state.RUNNING}) + 'power_state': power_state.RUNNING}) instances = self.compute_api.get_all(c, - search_opts={'state': power_state.SUSPENDED}) + search_opts={'power_state': power_state.SUSPENDED}) self.assertEqual(len(instances), 0) instances = self.compute_api.get_all(c, - search_opts={'state': power_state.SHUTDOWN}) + search_opts={'power_state': power_state.SHUTDOWN}) self.assertEqual(len(instances), 1) self.assertEqual(instances[0].id, instance_id1) instances = self.compute_api.get_all(c, - search_opts={'state': power_state.RUNNING}) + search_opts={'power_state': power_state.RUNNING}) self.assertEqual(len(instances), 2) instance_ids = [instance.id for instance in instances] self.assertTrue(instance_id2 in instance_ids) @@ -1333,7 +1334,7 @@ class ComputeTestCase(test.TestCase): # Test passing a list as search arg instances = self.compute_api.get_all(c, - search_opts={'state': [power_state.SHUTDOWN, + search_opts={'power_state': [power_state.SHUTDOWN, power_state.RUNNING]}) self.assertEqual(len(instances), 3) -- cgit From 805c1cec609b39ee5a0ba1517bf2f1d41e0c4fa9 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Thu, 18 Aug 2011 15:05:35 -0700 Subject: allow specification of key pair/security group info via metadata extract metadata about keypair / security group configuration from server metadata sent on create. This allows users to use these extensions with their existing api implementations. Also remove the code that choose the first key pair in the tenant - since it seems to have been used during the development of os api --- nova/api/openstack/create_instance_helper.py | 21 ++++++++------------- nova/api/openstack/servers.py | 1 - 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index b4a08dac0..031b06921 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -73,20 +73,15 @@ class CreateInstanceHelper(object): if not 'server' in body: raise exc.HTTPUnprocessableEntity() - server_dict = body['server'] context = req.environ['nova.context'] + server_dict = body['server'] + metadata = server_dict.get('metadata', {}) password = self.controller._get_server_admin_password(server_dict) - key_name = None - key_data = None - # TODO(vish): Key pair access should move into a common library - # instead of being accessed directly from the db. - key_pairs = db.key_pair_get_all_by_user(context.elevated(), - context.user_id) - if key_pairs: - key_pair = key_pairs[0] - key_name = key_pair['name'] - key_data = key_pair['public_key'] + # NOTE(ja): extract key_name and security_group from metadata + # to use in os extensions for firewall & keypairs + key_name = metadata.get('key_name') + security_group = metadata.get('security_group') image_href = self.controller._image_ref_from_req_data(body) # If the image href was generated by nova api, strip image_href @@ -155,8 +150,8 @@ class CreateInstanceHelper(object): display_name=name, display_description=name, key_name=key_name, - key_data=key_data, - metadata=server_dict.get('metadata', {}), + security_group=security_group, + metadata=metadata, injected_files=injected_files, admin_password=password, zone_blob=zone_blob, diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 335ecad86..2cf4e3eda 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -146,7 +146,6 @@ class Controller(object): def create(self, req, body): """ Creates a new server for a given user """ extra_values = None - result = None extra_values, instances = self.helper.create_instance( req, body, self.compute_api.create) -- cgit From bbe414cba5d389b553fb3122a3a7dbc9c6d898f2 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Thu, 18 Aug 2011 16:26:32 -0700 Subject: follow same pattern as userdata (not metadata apporach) --- nova/api/openstack/create_instance_helper.py | 27 +++++++++++++-------------- nova/api/openstack/views/servers.py | 3 +++ 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index e64a076c8..c2e9e76ac 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -75,13 +75,15 @@ class CreateInstanceHelper(object): context = req.environ['nova.context'] server_dict = body['server'] - metadata = server_dict.get('metadata', {}) password = self.controller._get_server_admin_password(server_dict) - # NOTE(ja): extract key_name and security_group from metadata - # to use in os extensions for firewall & keypairs - key_name = metadata.get('key_name') - security_group = metadata.get('security_group') + if not 'name' in server_dict: + msg = _("Server name is not defined") + raise exc.HTTPBadRequest(explanation=msg) + + name = server_dict['name'] + self._validate_server_name(name) + name = name.strip() image_href = self.controller._image_ref_from_req_data(body) # If the image href was generated by nova api, strip image_href @@ -112,16 +114,13 @@ class CreateInstanceHelper(object): msg = _("Invalid flavorRef provided.") raise exc.HTTPBadRequest(explanation=msg) - if not 'name' in server_dict: - msg = _("Server name is not defined") - raise exc.HTTPBadRequest(explanation=msg) - zone_blob = server_dict.get('blob') - user_data = server_dict.get('user_data') availability_zone = server_dict.get('availability_zone') - name = server_dict['name'] - self._validate_server_name(name) - name = name.strip() + + # optional openstack extensions: + key_name = server_dict.get('key_name') + security_group = server_dict.get('security_group') + user_data = server_dict.get('user_data') reservation_id = server_dict.get('reservation_id') min_count = server_dict.get('min_count') @@ -152,7 +151,7 @@ class CreateInstanceHelper(object): display_description=name, key_name=key_name, security_group=security_group, - metadata=metadata, + metadata=server_dict.get('metadata', {}), injected_files=injected_files, admin_password=password, zone_blob=zone_blob, diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index edc328129..ac2de0c57 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -78,6 +78,9 @@ class ViewBuilder(object): metadata[item['key']] = str(item['value']) inst_dict['metadata'] = metadata + inst_dict['key_name'] = inst.get('key_name') + inst_dict['security_group'] = inst.get('security_group') + inst_dict['hostId'] = '' if inst.get('host'): inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest() -- cgit From f4f0ce95946962be73cfc509b24fd000fc931198 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Thu, 18 Aug 2011 16:46:16 -0700 Subject: tests pass --- nova/api/openstack/views/servers.py | 4 ++-- nova/tests/api/openstack/test_servers.py | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index ac2de0c57..285228b30 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -78,8 +78,8 @@ class ViewBuilder(object): metadata[item['key']] = str(item['value']) inst_dict['metadata'] = metadata - inst_dict['key_name'] = inst.get('key_name') - inst_dict['security_group'] = inst.get('security_group') + inst_dict['key_name'] = inst.get('key_name', '') + inst_dict['security_group'] = inst.get('security_group', '') inst_dict['hostId'] = '' if inst.get('host'): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index a510d7d97..e374abb6b 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -335,6 +335,8 @@ class ServersTest(test.TestCase): "name": "server1", "status": "BUILD", "hostId": '', + "key_name": '', + "security_group": '', "image": { "id": "10", "links": [ @@ -497,6 +499,8 @@ class ServersTest(test.TestCase): "name": "server1", "status": "ACTIVE", "hostId": '', + "key_name": '', + "security_group": '', "image": { "id": "10", "links": [ @@ -588,6 +592,8 @@ class ServersTest(test.TestCase): "name": "server1", "status": "ACTIVE", "hostId": '', + "key_name": '', + "security_group": '', "image": { "id": "10", "links": [ @@ -3094,6 +3100,8 @@ class ServersViewBuilderV11Test(test.TestCase): "name": "test_server", "status": "BUILD", "hostId": '', + "key_name": '', + "security_group": '', "image": { "id": "5", "links": [ @@ -3145,6 +3153,8 @@ class ServersViewBuilderV11Test(test.TestCase): "name": "test_server", "status": "ACTIVE", "hostId": '', + "key_name": '', + "security_group": '', "image": { "id": "5", "links": [ @@ -3200,6 +3210,8 @@ class ServersViewBuilderV11Test(test.TestCase): "name": "test_server", "status": "BUILD", "hostId": '', + "key_name": '', + "security_group": '', "image": { "id": "5", "links": [ @@ -3265,6 +3277,8 @@ class ServerXMLSerializationTest(test.TestCase): "name": "test_server", "status": "BUILD", "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', + "key_name": '', + "security_group": '', "image": { "id": "5", "links": [ -- cgit From 203309693fc2dd648b9d4b211686228557728c89 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Thu, 18 Aug 2011 19:05:40 -0700 Subject: remove security groups, improve exception handling, add tests --- nova/api/openstack/create_instance_helper.py | 5 ++-- nova/api/openstack/views/servers.py | 1 - nova/tests/api/openstack/test_servers.py | 42 +++++++++++++++++++++++----- 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index c2e9e76ac..84d8c0c39 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -119,7 +119,6 @@ class CreateInstanceHelper(object): # optional openstack extensions: key_name = server_dict.get('key_name') - security_group = server_dict.get('security_group') user_data = server_dict.get('user_data') reservation_id = server_dict.get('reservation_id') @@ -150,7 +149,6 @@ class CreateInstanceHelper(object): display_name=name, display_description=name, key_name=key_name, - security_group=security_group, metadata=server_dict.get('metadata', {}), injected_files=injected_files, admin_password=password, @@ -168,6 +166,9 @@ class CreateInstanceHelper(object): except exception.FlavorNotFound as error: msg = _("Invalid flavorRef provided.") raise exc.HTTPBadRequest(explanation=msg) + except exception.KeypairNotFound as error: + msg = _("Invalid key_name provided.") + raise exc.HTTPBadRequest(explanation=msg) # Let the caller deal with unhandled exceptions. def _handle_quota_error(self, error): diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 285228b30..c0df5abe6 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -79,7 +79,6 @@ class ViewBuilder(object): inst_dict['metadata'] = metadata inst_dict['key_name'] = inst.get('key_name', '') - inst_dict['security_group'] = inst.get('security_group', '') inst_dict['hostId'] = '' if inst.get('host'): diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index e374abb6b..139820c09 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -336,7 +336,6 @@ class ServersTest(test.TestCase): "status": "BUILD", "hostId": '', "key_name": '', - "security_group": '', "image": { "id": "10", "links": [ @@ -500,7 +499,6 @@ class ServersTest(test.TestCase): "status": "ACTIVE", "hostId": '', "key_name": '', - "security_group": '', "image": { "id": "10", "links": [ @@ -593,7 +591,6 @@ class ServersTest(test.TestCase): "status": "ACTIVE", "hostId": '', "key_name": '', - "security_group": '', "image": { "id": "10", "links": [ @@ -1643,6 +1640,41 @@ class ServersTest(test.TestCase): self.assertEqual(expected_flavor, server['flavor']) self.assertEqual(expected_image, server['image']) + def test_create_instance_v1_1_invalid_key_name(self): + self._setup_for_create_instance() + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/flavors/3' + body = dict(server=dict( + name='server_test', imageRef=image_href, flavorRef=flavor_ref, + key_name='nonexistentkey')) + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_create_instance_v1_1_valid_key_name(self): + self._setup_for_create_instance() + + def key_pair_get(context, user_id, key_name): + return dict(name='mykey', public_key='public_key') + + self.stubs.Set(nova.db, 'key_pair_get', key_pair_get) + + image_href = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/flavors/3' + body = dict(server=dict( + name='server_test', imageRef=image_href, flavorRef=flavor_ref, + key_name='mykey')) + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + def test_create_instance_v1_1_invalid_flavor_href(self): self._setup_for_create_instance() @@ -3101,7 +3133,6 @@ class ServersViewBuilderV11Test(test.TestCase): "status": "BUILD", "hostId": '', "key_name": '', - "security_group": '', "image": { "id": "5", "links": [ @@ -3154,7 +3185,6 @@ class ServersViewBuilderV11Test(test.TestCase): "status": "ACTIVE", "hostId": '', "key_name": '', - "security_group": '', "image": { "id": "5", "links": [ @@ -3211,7 +3241,6 @@ class ServersViewBuilderV11Test(test.TestCase): "status": "BUILD", "hostId": '', "key_name": '', - "security_group": '', "image": { "id": "5", "links": [ @@ -3278,7 +3307,6 @@ class ServerXMLSerializationTest(test.TestCase): "status": "BUILD", "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0', "key_name": '', - "security_group": '', "image": { "id": "5", "links": [ -- cgit From 1ab7b11a439cef8b48621355467d5fb460d9b2e2 Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Thu, 18 Aug 2011 22:52:45 -0700 Subject: add key_name/data support to server stub --- nova/tests/api/openstack/test_servers.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 139820c09..605b3fee3 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -145,7 +145,7 @@ def instance_addresses(context, instance_id): def stub_instance(id, user_id='fake', project_id='fake', private_address=None, public_addresses=None, host=None, power_state=0, reservation_id="", uuid=FAKE_UUID, image_ref="10", - flavor_id="1", interfaces=None, name=None): + flavor_id="1", interfaces=None, name=None, key_name=''): metadata = [] metadata.append(InstanceMetadata(key='seq', value=id)) @@ -160,6 +160,11 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, if host is not None: host = str(host) + if key_name: + key_data = 'FAKE' + else: + key_data = '' + # ReservationID isn't sent back, hack it in there. server_name = name or "server%s" % id if reservation_id != "": @@ -176,8 +181,8 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, "kernel_id": "", "ramdisk_id": "", "launch_index": 0, - "key_name": "", - "key_data": "", + "key_name": key_name, + "key_data": key_data, "state": power_state, "state_description": "", "memory_mb": 0, -- cgit From 9b96b24ec93864731b6fc5031d2eceb22398be24 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 19 Aug 2011 09:30:52 -0400 Subject: Bump migration number. --- .../versions/037_update_instance_states.py | 57 ---------------------- .../versions/039_update_instance_states.py | 57 ++++++++++++++++++++++ 2 files changed, 57 insertions(+), 57 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py deleted file mode 100644 index 07efbf90f..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_update_instance_states.py +++ /dev/null @@ -1,57 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, Column, String - -meta = MetaData() - -c_task_state = Column('task_state', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.state - c_state.alter(name='power_state') - - c_vm_state = instances.c.state_description - c_vm_state.alter(name='vm_state') - - instances.create_column(c_task_state) - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.power_state - c_state.alter(name='state') - - c_vm_state = instances.c.vm_state - c_vm_state.alter(name='state_description') - - instances.drop_column('task_state') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py new file mode 100644 index 000000000..07efbf90f --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + +meta = MetaData() + +c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.state + c_state.alter(name='power_state') + + c_vm_state = instances.c.state_description + c_vm_state.alter(name='vm_state') + + instances.create_column(c_task_state) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.power_state + c_state.alter(name='state') + + c_vm_state = instances.c.vm_state + c_vm_state.alter(name='state_description') + + instances.drop_column('task_state') -- cgit From 3bd386cdba53f6a54a29e510c0f9eecf9b9ea7d9 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 19 Aug 2011 15:13:40 -0400 Subject: vm_state --> vm_states --- nova/api/ec2/cloud.py | 24 ++++++++-------- nova/compute/api.py | 46 +++++++++++++++---------------- nova/compute/manager.py | 50 +++++++++++++++++----------------- nova/compute/vm_state.py | 31 --------------------- nova/compute/vm_states.py | 31 +++++++++++++++++++++ nova/scheduler/driver.py | 4 +-- nova/tests/scheduler/test_scheduler.py | 2 +- nova/tests/test_compute.py | 8 +++--- 8 files changed, 98 insertions(+), 98 deletions(-) delete mode 100644 nova/compute/vm_state.py create mode 100644 nova/compute/vm_states.py diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 9aebf92e3..4b69cc272 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -47,6 +47,7 @@ from nova import utils from nova import volume from nova.api.ec2 import ec2utils from nova.compute import instance_types +from nova.compute import vm_states from nova.image import s3 @@ -1039,11 +1040,10 @@ class CloudController(object): def _format_attr_instance_initiated_shutdown_behavior(instance, result): - state_description = instance['state_description'] - state_to_value = {'stopping': 'stop', - 'stopped': 'stop', - 'terminating': 'terminate'} - value = state_to_value.get(state_description) + vm_state = instance['vm_state'] + state_to_value = {vm_states.STOP: 'stop', + vm_states.DELETE: 'terminate'} + value = state_to_value.get(vm_state) if value: result['instanceInitiatedShutdownBehavior'] = value @@ -1198,8 +1198,8 @@ class CloudController(object): self._format_kernel_id(instance, i, 'kernelId') self._format_ramdisk_id(instance, i, 'ramdiskId') i['instanceState'] = { - 'code': instance['state'], - 'name': instance['state_description']} + 'code': instance['power_state'], + 'name': instance['vm_state']} #FIXME fixed_addr = None floating_addr = None if instance['fixed_ips']: @@ -1618,22 +1618,22 @@ class CloudController(object): # stop the instance if necessary restart_instance = False if not no_reboot: - state_description = instance['state_description'] + vm_state = instance['vm_state'] # if the instance is in subtle state, refuse to proceed. - if state_description not in ('running', 'stopping', 'stopped'): + if vm_state not in (vm_states.ACTIVE, vm_states.STOP): raise exception.InstanceNotRunning(instance_id=ec2_instance_id) - if state_description == 'running': + if vm_state == vm_states.ACTIVE: restart_instance = True self.compute_api.stop(context, instance_id=instance_id) # wait instance for really stopped start_time = time.time() - while state_description != 'stopped': + while vm_state != vm_states.STOP: time.sleep(1) instance = self.compute_api.get(context, instance_id) - state_description = instance['state_description'] + vm_state = instance['vm_state'] # NOTE(yamahata): timeout and error. 1 hour for now for safety. # Is it too short/long? # Or is there any better way? diff --git a/nova/compute/api.py b/nova/compute/api.py index 77397f90e..8634ad27a 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -36,7 +36,7 @@ from nova import utils from nova import volume from nova.compute import instance_types from nova.compute import power_state -from nova.compute import vm_state +from nova.compute import vm_states from nova.compute.utils import terminate_volumes from nova.scheduler import api as scheduler_api from nova.db import base @@ -76,10 +76,10 @@ def generate_default_hostname(instance): def _is_able_to_shutdown(instance, instance_id): states = { - vm_state.DELETE: "Instance %s is already being terminated", - vm_state.MIGRATE: "Instance %s is being migrated", - vm_state.RESIZE: "Instance %s is being resized", - vm_state.STOP: "Instance %s is being stopped", + vm_states.DELETE: "Instance %s is already being terminated", + vm_states.MIGRATE: "Instance %s is being migrated", + vm_states.RESIZE: "Instance %s is being resized", + vm_states.STOP: "Instance %s is being stopped", } msg = states.get(instance['vm_state']) if msg: @@ -236,7 +236,7 @@ class API(base.Base): 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', 'power_state': power_state.NOSTATE, - 'vm_state': vm_state.BUILD, + 'vm_state': vm_states.BUILD, 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), @@ -653,7 +653,7 @@ class API(base.Base): self.update(context, instance_id, - vm_state=vm_state.DELETE, + vm_state=vm_states.DELETE, terminated_at=utils.utcnow()) host = instance['host'] @@ -675,7 +675,7 @@ class API(base.Base): self.update(context, instance_id, - vm_state=vm_state.STOP, + vm_state=vm_states.STOP, terminated_at=utils.utcnow()) host = instance['host'] @@ -689,12 +689,12 @@ class API(base.Base): instance = self._get_instance(context, instance_id, 'starting') vm_state = instance["vm_state"] - if vm_state != vm_state.STOP: + if vm_state != vm_states.STOP: LOG.warning(_("Instance %(instance_id)s is not " "stopped. (%(vm_state)s)") % locals()) return - self.update(context, instance_id, vm_state=vm_state.ACTIVE) + self.update(context, instance_id, vm_state=vm_states.ACTIVE) # TODO(yamahata): injected_files isn't supported right now. # It is used only for osapi. not for ec2 api. @@ -923,7 +923,7 @@ class API(base.Base): @scheduler_api.reroute_compute("reboot") def reboot(self, context, instance_id): """Reboot the given instance.""" - self.update(context, instance_id, vm_state=vm_state.REBOOT) + self.update(context, instance_id, vm_state=vm_states.REBOOT) self._cast_compute_message('reboot_instance', context, instance_id) @scheduler_api.reroute_compute("rebuild") @@ -932,8 +932,8 @@ class API(base.Base): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) invalid_rebuild_states = [ - vm_state.BUILD, - vm_state.REBUILD, + vm_states.BUILD, + vm_states.REBUILD, ] if instance["vm_state"] in invalid_rebuild_states: @@ -956,7 +956,7 @@ class API(base.Base): "injected_files": files_to_inject, } - self.update(context, instance_id, vm_state=vm_state.REBUILD) + self.update(context, instance_id, vm_state=vm_states.REBUILD) self._cast_compute_message('rebuild_instance', context, @@ -975,7 +975,7 @@ class API(base.Base): raise exception.MigrationNotFoundByStatus(instance_id=instance_id, status='finished') - self.update(context, instance_id, vm_state=vm_state.ACTIVE) + self.update(context, instance_id, vm_state=vm_states.ACTIVE) params = {'migration_id': migration_ref['id']} self._cast_compute_message('revert_resize', context, @@ -998,7 +998,7 @@ class API(base.Base): raise exception.MigrationNotFoundByStatus(instance_id=instance_id, status='finished') - self.update(context, instance_id, vm_state=vm_state.ACTIVE) + self.update(context, instance_id, vm_state=vm_states.ACTIVE) params = {'migration_id': migration_ref['id']} self._cast_compute_message('confirm_resize', context, @@ -1045,7 +1045,7 @@ class API(base.Base): if (current_memory_mb == new_memory_mb) and flavor_id: raise exception.CannotResizeToSameSize() - self.update(context, instance_id, vm_state=vm_state.RESIZE) + self.update(context, instance_id, vm_state=vm_states.RESIZE) instance_ref = self._get_instance(context, instance_id, 'resize') self._cast_scheduler_message(context, @@ -1080,13 +1080,13 @@ class API(base.Base): @scheduler_api.reroute_compute("pause") def pause(self, context, instance_id): """Pause the given instance.""" - self.update(context, instance_id, vm_state=vm_state.PAUSE) + self.update(context, instance_id, vm_state=vm_states.PAUSE) self._cast_compute_message('pause_instance', context, instance_id) @scheduler_api.reroute_compute("unpause") def unpause(self, context, instance_id): """Unpause the given instance.""" - self.update(context, instance_id, vm_state=vm_state.ACTIVE) + self.update(context, instance_id, vm_state=vm_states.ACTIVE) self._cast_compute_message('unpause_instance', context, instance_id) def _call_compute_message_for_host(self, action, context, host, params): @@ -1119,25 +1119,25 @@ class API(base.Base): @scheduler_api.reroute_compute("suspend") def suspend(self, context, instance_id): """Suspend the given instance.""" - self.update(context, instance_id, vm_state=vm_state.SUSPEND) + self.update(context, instance_id, vm_state=vm_states.SUSPEND) self._cast_compute_message('suspend_instance', context, instance_id) @scheduler_api.reroute_compute("resume") def resume(self, context, instance_id): """Resume the given instance.""" - self.update(context, instance_id, vm_state=vm_state.ACTIVE) + self.update(context, instance_id, vm_state=vm_states.ACTIVE) self._cast_compute_message('resume_instance', context, instance_id) @scheduler_api.reroute_compute("rescue") def rescue(self, context, instance_id): """Rescue the given instance.""" - self.update(context, instance_id, vm_state=vm_state.RESCUE) + self.update(context, instance_id, vm_state=vm_states.RESCUE) self._cast_compute_message('rescue_instance', context, instance_id) @scheduler_api.reroute_compute("unrescue") def unrescue(self, context, instance_id): """Unrescue the given instance.""" - self.update(context, instance_id, vm_state=vm_state.ACTIVE) + self.update(context, instance_id, vm_state=vm_states.ACTIVE) self._cast_compute_message('unrescue_instance', context, instance_id) @scheduler_api.reroute_compute("set_admin_password") diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0d7f3ad71..4be5bdd69 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -57,7 +57,7 @@ from nova import utils from nova import volume from nova.compute import power_state from nova.compute import task_state -from nova.compute import vm_state +from nova.compute import vm_states from nova.notifier import api as notifier from nova.compute.utils import terminate_volumes from nova.virt import driver @@ -372,7 +372,7 @@ class ComputeManager(manager.SchedulerDependentManager): updates = {} updates['host'] = self.host updates['launched_on'] = self.host - updates['vm_state'] = vm_state.BUILD + updates['vm_state'] = vm_states.BUILD updates['task_state'] = task_state.NETWORKING instance = self.db.instance_update(context, instance_id, updates) instance['injected_files'] = kwargs.get('injected_files', []) @@ -397,7 +397,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.BUILD, + vm_state=vm_states.BUILD, task_state=task_state.BLOCK_DEVICE_MAPPING) (swap, ephemerals, @@ -428,7 +428,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None, launched_at=utils.utcnow()) @@ -523,7 +523,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.REBUILD, + vm_state=vm_states.REBUILD, task_state=task_state.REBUILDING) network_info = self._get_instance_nw_info(context, instance_ref) @@ -531,7 +531,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.REBUILD, + vm_state=vm_states.REBUILD, task_state=task_state.BLOCK_DEVICE_MAPPING) bd_mapping = self._setup_block_device_mapping(context, instance_id) @@ -544,7 +544,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.REBUILD, + vm_state=vm_states.REBUILD, task_state=task_state.SPAWN) self.driver.spawn(context, instance_ref, network_info, bd_mapping) @@ -553,7 +553,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None, image_ref=image_ref, launched_at=utils.utcnow()) @@ -577,7 +577,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.REBOOT, + vm_state=vm_states.REBOOT, task_state=task_state.REBOOTING) if instance_ref['power_state'] != power_state.RUNNING: @@ -595,7 +595,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -622,7 +622,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=image_type) LOG.audit(_('instance %s: snapshotting'), instance_id, @@ -787,7 +787,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.RESCUE, + vm_state=vm_states.RESCUE, task_state=task_state.RESCUING) instance_ref = self.db.instance_get(context, instance_id) @@ -799,7 +799,7 @@ class ComputeManager(manager.SchedulerDependentManager): current_power_state = self._get_power_state(context, instance_ref) self._instance_update(context, instance_id, - vm_state=vm_state.RESCUE, + vm_state=vm_states.RESCUE, task_state=task_state.RESCUED, power_state=current_power_state) @@ -812,7 +812,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=task_state.UNRESCUING) instance_ref = self.db.instance_get(context, instance_id) @@ -824,7 +824,7 @@ class ComputeManager(manager.SchedulerDependentManager): current_power_state = self._get_power_state(context, instance_ref) self._instance_update(context, instance_id, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None, power_state=current_power_state) @@ -1048,7 +1048,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.PAUSE, + vm_state=vm_states.PAUSE, task_state=task_state.PAUSING) instance_ref = self.db.instance_get(context, instance_id) @@ -1058,7 +1058,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.PAUSE, + vm_state=vm_states.PAUSE, task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -1070,7 +1070,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=task_state.UNPAUSING) instance_ref = self.db.instance_get(context, instance_id) @@ -1080,7 +1080,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -1111,7 +1111,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.SUSPEND, + vm_state=vm_states.SUSPEND, task_state=task_state.SUSPENDING) instance_ref = self.db.instance_get(context, instance_id) @@ -1121,7 +1121,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.SUSPEND, + vm_state=vm_states.SUSPEND, task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -1133,7 +1133,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=task_state.RESUMING) instance_ref = self.db.instance_get(context, instance_id) @@ -1143,7 +1143,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -1560,7 +1560,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref["id"], host=dest, power_state=current_power_state, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None) # Restore volume state @@ -1611,7 +1611,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_ref['id'], host=host, - vm_state=vm_state.ACTIVE, + vm_state=vm_states.ACTIVE, task_state=None) for volume_ref in instance_ref['volumes']: diff --git a/nova/compute/vm_state.py b/nova/compute/vm_state.py deleted file mode 100644 index a1bca6ef4..000000000 --- a/nova/compute/vm_state.py +++ /dev/null @@ -1,31 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Possible vm states for instances""" - -ACTIVE='active' -BUILD='build' -REBUILD='rebuild' -REBOOT='reboot' -DELETE='delete' -STOP='stop' -MIGRATE='migrate' -RESIZE='resize' -VERIFY_RESIZE='verify_resize' -PAUSE='pause' -SUSPEND='suspend' -RESCUE='rescue' diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py new file mode 100644 index 000000000..a1bca6ef4 --- /dev/null +++ b/nova/compute/vm_states.py @@ -0,0 +1,31 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible vm states for instances""" + +ACTIVE='active' +BUILD='build' +REBUILD='rebuild' +REBOOT='reboot' +DELETE='delete' +STOP='stop' +MIGRATE='migrate' +RESIZE='resize' +VERIFY_RESIZE='verify_resize' +PAUSE='pause' +SUSPEND='suspend' +RESCUE='rescue' diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index b788b996f..8f9be879b 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -31,7 +31,7 @@ from nova import rpc from nova import utils from nova.compute import power_state from nova.compute import task_state -from nova.compute import vm_state +from nova.compute import vm_states from nova.api.ec2 import ec2utils @@ -106,7 +106,7 @@ class Scheduler(object): dest, block_migration) # Changing instance_state. - values = {"vm_state": vm_state.MIGRATE} + values = {"vm_state": vm_states.MIGRATE} db.instance_update(context, instance_id, values) # Changing volume state diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 1b5e131c9..629019eaf 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -40,7 +40,7 @@ from nova.scheduler import driver from nova.scheduler import manager from nova.scheduler import multi from nova.compute import power_state -from nova.compute import vm_state +from nova.compute import vm_states FLAGS = flags.FLAGS diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 188398924..ca1bbc69f 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -23,7 +23,7 @@ from nova import compute from nova.compute import instance_types from nova.compute import manager as compute_manager from nova.compute import power_state -from nova.compute import vm_state +from nova.compute import vm_states from nova import context from nova import db from nova.db.sqlalchemy import models @@ -748,7 +748,7 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_state.ACTIVE, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE, 'task_state': None, 'host': i_ref['host']}) for v in i_ref['volumes']: @@ -780,7 +780,7 @@ class ComputeTestCase(test.TestCase): 'block_migration': False, 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) - dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_state.ACTIVE, + dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE, 'task_state': None, 'host': i_ref['host']}) @@ -826,7 +826,7 @@ class ComputeTestCase(test.TestCase): c = context.get_admin_context() instance_id = self._create_instance() i_ref = db.instance_get(c, instance_id) - db.instance_update(c, i_ref['id'], {'vm_state': vm_state.MIGRATE, + db.instance_update(c, i_ref['id'], {'vm_state': vm_states.MIGRATE, 'power_state': power_state.PAUSED}) v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id}) fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1', -- cgit From c4e77b67a74cb0828bb9a7ccbedcaa1baeb6188d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Fri, 19 Aug 2011 18:34:34 -0400 Subject: Lots of modifications surrounding the OSAPI to remove any mention of dealing with power states and exclusively using vm_states and task_state modules. Currently there are still a number of tests failing, but this is a stopping place for today. --- nova/api/openstack/common.py | 52 ++++++------ nova/api/openstack/servers.py | 12 ++- nova/api/openstack/views/servers.py | 5 +- nova/compute/vm_states.py | 2 + nova/tests/api/openstack/test_server_actions.py | 28 ++++--- nova/tests/api/openstack/test_servers.py | 102 +++++++++++++++--------- nova/tests/integrated/test_servers.py | 27 ++++--- nova/tests/vmwareapi/db_fakes.py | 5 +- 8 files changed, 138 insertions(+), 95 deletions(-) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index d9eb832f2..eae0fd916 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -27,7 +27,8 @@ from nova import flags from nova import log as logging from nova import quota from nova.api.openstack import wsgi -from nova.compute import power_state as compute_power_state +from nova.compute import vm_states +from nova.compute import task_state LOG = logging.getLogger('nova.api.openstack.common') @@ -38,36 +39,35 @@ XML_NS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0' XML_NS_V11 = 'http://docs.openstack.org/compute/api/v1.1' -_STATUS_MAP = { - None: 'BUILD', - compute_power_state.NOSTATE: 'BUILD', - compute_power_state.RUNNING: 'ACTIVE', - compute_power_state.BLOCKED: 'ACTIVE', - compute_power_state.SUSPENDED: 'SUSPENDED', - compute_power_state.PAUSED: 'PAUSED', - compute_power_state.SHUTDOWN: 'SHUTDOWN', - compute_power_state.SHUTOFF: 'SHUTOFF', - compute_power_state.CRASHED: 'ERROR', - compute_power_state.FAILED: 'ERROR', - compute_power_state.BUILDING: 'BUILD', +_STATE_MAP = { + vm_states.ACTIVE: 'ACTIVE', + vm_states.BUILD: 'BUILD', + vm_states.REBUILD: 'REBUILD', + vm_states.REBOOT: 'REBOOT', + vm_states.HARD_REBOOT: 'HARD_REBOOT', + vm_states.STOP: 'STOPPED', + vm_states.MIGRATE: 'MIGRATING', + vm_states.RESIZE: 'RESIZE', + vm_states.VERIFY_RESIZE: 'VERIFY_RESIZE', + vm_states.PAUSE: 'PAUSED', + vm_states.SUSPEND: 'SUSPENDED', + vm_states.RESCUE: 'RESCUE', + vm_states.ERROR: 'ERROR', } -def status_from_power_state(power_state): - """Map the power state to the server status string""" - return _STATUS_MAP[power_state] +def status_from_state(_vm_state, _task_state=None): + """Given vm_state and task_state, return a status string.""" + if _vm_state == vm_states.ACTIVE and _task_state == task_state.PASSWORD: + return "PASSWORD" + return _STATE_MAP.get(_vm_state, "UNKNOWN_STATE") -def power_states_from_status(status): - """Map the server status string to a list of power states""" - power_states = [] - for power_state, status_map in _STATUS_MAP.iteritems(): - # Skip the 'None' state - if power_state is None: - continue - if status.lower() == status_map.lower(): - power_states.append(power_state) - return power_states +def vm_state_from_status(status): + """Map the server status string to a vm state.""" + for state, status_string in _STATE_MAP.iteritems(): + if status.lower() == status_string.lower(): + return state def get_pagination_params(request): diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 41e63ec3c..0a451caee 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -95,17 +95,15 @@ class Controller(object): search_opts['recurse_zones'] = utils.bool_from_str( search_opts.get('recurse_zones', False)) - # If search by 'status', we need to convert it to 'state' - # If the status is unknown, bail. - # Leave 'state' in search_opts so compute can pass it on to - # child zones.. + # If search by 'status', we need to convert it to 'vm_state' + # to pass on to child zones. if 'status' in search_opts: status = search_opts['status'] - search_opts['state'] = common.power_states_from_status(status) - if len(search_opts['state']) == 0: + state = common.vm_state_from_status(status) + if state is None: reason = _('Invalid server status: %(status)s') % locals() - LOG.error(reason) raise exception.InvalidInput(reason=reason) + search_opts['vm_state'] = state # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index edc328129..e9a932b0e 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -27,6 +27,7 @@ from nova.api.openstack.views import addresses as addresses_view from nova.api.openstack.views import flavors as flavors_view from nova.api.openstack.views import images as images_view from nova import utils +from nova.compute import vm_states class ViewBuilder(object): @@ -60,11 +61,13 @@ class ViewBuilder(object): def _build_detail(self, inst): """Returns a detailed model of a server.""" + vm_state = inst.get('vm_state', vm_states.BUILD) + task_state = inst.get('task_state') inst_dict = { 'id': inst['id'], 'name': inst['display_name'], - 'status': common.status_from_power_state(inst.get('state'))} + 'status': common.status_from_state(vm_state, task_state)} ctxt = nova.context.get_admin_context() compute_api = nova.compute.API() diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py index a1bca6ef4..d3d168001 100644 --- a/nova/compute/vm_states.py +++ b/nova/compute/vm_states.py @@ -21,6 +21,7 @@ ACTIVE='active' BUILD='build' REBUILD='rebuild' REBOOT='reboot' +HARD_REBOOT='hard_reboot' DELETE='delete' STOP='stop' MIGRATE='migrate' @@ -29,3 +30,4 @@ VERIFY_RESIZE='verify_resize' PAUSE='pause' SUSPEND='suspend' RESCUE='rescue' +ERROR='error' diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 80a27e30f..6f8be0f47 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -12,7 +12,8 @@ from nova import utils from nova import flags from nova.api.openstack import create_instance_helper from nova.compute import instance_types -from nova.compute import power_state +from nova.compute import vm_states +from nova.compute import task_state import nova.db.api from nova import test from nova.tests.api.openstack import common @@ -30,17 +31,18 @@ def instance_update(context, instance_id, kwargs): return _get_instance() -def return_server_with_power_state(power_state): +def return_server_with_state(vm_state, task_state=None): def _return_server(context, id): instance = _get_instance() - instance['state'] = power_state + instance['vm_state'] = vm_state + instance['task_state'] = task_state return instance return _return_server -def return_server_with_uuid_and_power_state(power_state): +def return_server_with_uuid_and_state(vm_state, task_state=None): def _return_server(context, id): - return return_server_with_power_state(power_state) + return return_server_with_state(vm_state, task_state) return _return_server @@ -68,8 +70,8 @@ def _get_instance(): "launch_index": 0, "key_name": "", "key_data": "", - "state": 0, - "state_description": "", + "vm_state": vm_states.ACTIVE, + "task_state": None, "memory_mb": 0, "vcpus": 0, "local_gb": 0, @@ -164,11 +166,11 @@ class ServerActionsTest(test.TestCase): }, } - state = power_state.BUILDING - new_return_server = return_server_with_power_state(state) + state = vm_states.BUILD + new_return_server = return_server_with_state(state) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) self.stubs.Set(nova.db, 'instance_get_by_uuid', - return_server_with_uuid_and_power_state(state)) + return_server_with_uuid_and_state(state)) req = webob.Request.blank('/v1.0/servers/1/action') req.method = 'POST' @@ -627,11 +629,11 @@ class ServerActionsTestV11(test.TestCase): }, } - state = power_state.BUILDING - new_return_server = return_server_with_power_state(state) + state = vm_states.BUILD + new_return_server = return_server_with_state(state) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) self.stubs.Set(nova.db, 'instance_get_by_uuid', - return_server_with_uuid_and_power_state(state)) + return_server_with_uuid_and_state(state)) req = webob.Request.blank('/v1.1/servers/1/action') req.method = 'POST' diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 437620854..b500c514e 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -34,7 +34,8 @@ from nova.api.openstack import servers from nova.api.openstack import wsgi import nova.compute.api from nova.compute import instance_types -from nova.compute import power_state +from nova.compute import task_state +from nova.compute import vm_states import nova.db.api import nova.scheduler.api from nova.db.sqlalchemy.models import Instance @@ -86,15 +87,18 @@ def return_server_with_addresses(private, public): return _return_server -def return_server_with_power_state(power_state): +def return_server_with_state(vm_state, task_state=None): def _return_server(context, id): - return stub_instance(id, power_state=power_state) + return stub_instance(id, vm_state=vm_state, task_state=task_state) return _return_server -def return_server_with_uuid_and_power_state(power_state): +def return_server_with_uuid_and_state(vm_state, task_state): def _return_server(context, id): - return stub_instance(id, uuid=FAKE_UUID, power_state=power_state) + return stub_instance(id, + uuid=FAKE_UUID, + vm_state=vm_state, + task_state=task_state) return _return_server @@ -143,7 +147,8 @@ def instance_addresses(context, instance_id): def stub_instance(id, user_id='fake', project_id='fake', private_address=None, - public_addresses=None, host=None, power_state=0, + public_addresses=None, host=None, + vm_state=None, task_state=None, reservation_id="", uuid=FAKE_UUID, image_ref="10", flavor_id="1", interfaces=None, name=None): metadata = [] @@ -178,8 +183,8 @@ def stub_instance(id, user_id='fake', project_id='fake', private_address=None, "launch_index": 0, "key_name": "", "key_data": "", - "state": power_state, - "state_description": "", + "vm_state": vm_state or vm_states.BUILD, + "task_state": task_state, "memory_mb": 0, "vcpus": 0, "local_gb": 0, @@ -481,7 +486,7 @@ class ServersTest(test.TestCase): }, ] new_return_server = return_server_with_attributes( - interfaces=interfaces, power_state=1) + interfaces=interfaces, vm_state=vm_states.ACTIVE) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) req = webob.Request.blank('/v1.1/servers/1') @@ -571,8 +576,8 @@ class ServersTest(test.TestCase): }, ] new_return_server = return_server_with_attributes( - interfaces=interfaces, power_state=1, image_ref=image_ref, - flavor_id=flavor_id) + interfaces=interfaces, vm_state=vm_states.ACTIVE, + image_ref=image_ref, flavor_id=flavor_id) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) req = webob.Request.blank('/v1.1/servers/1') @@ -1169,9 +1174,8 @@ class ServersTest(test.TestCase): def test_get_servers_allows_status_v1_1(self): def fake_get_all(compute_self, context, search_opts=None): self.assertNotEqual(search_opts, None) - self.assertTrue('state' in search_opts) - self.assertEqual(set(search_opts['state']), - set([power_state.RUNNING, power_state.BLOCKED])) + self.assertTrue('vm_state' in search_opts) + self.assertEqual(search_opts['vm_state'], vm_states.ACTIVE) return [stub_instance(100)] self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) @@ -1188,13 +1192,9 @@ class ServersTest(test.TestCase): def test_get_servers_invalid_status_v1_1(self): """Test getting servers by invalid status""" - self.flags(allow_admin_api=False) - req = webob.Request.blank('/v1.1/servers?status=running') res = req.get_response(fakes.wsgi_app()) - # The following assert will fail if either of the asserts in - # fake_get_all() fail self.assertEqual(res.status_int, 400) self.assertTrue(res.body.find('Invalid server status') > -1) @@ -1632,6 +1632,7 @@ class ServersTest(test.TestCase): server = json.loads(res.body)['server'] self.assertEqual(16, len(server['adminPass'])) self.assertEqual(1, server['id']) + self.assertEqual("BUILD", server["status"]) self.assertEqual(0, server['progress']) self.assertEqual('server_test', server['name']) self.assertEqual(expected_flavor, server['flavor']) @@ -2165,23 +2166,52 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 204) self.assertEqual(self.server_delete_called, True) - def test_shutdown_status(self): - new_server = return_server_with_power_state(power_state.SHUTDOWN) - self.stubs.Set(nova.db.api, 'instance_get', new_server) - req = webob.Request.blank('/v1.0/servers/1') - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - res_dict = json.loads(res.body) - self.assertEqual(res_dict['server']['status'], 'SHUTDOWN') - def test_shutoff_status(self): - new_server = return_server_with_power_state(power_state.SHUTOFF) +class TestServerStatus(test.TestCase): + + def _get_with_state(self, vm_state, task_state=None): + new_server = return_server_with_state(vm_state, task_state) self.stubs.Set(nova.db.api, 'instance_get', new_server) - req = webob.Request.blank('/v1.0/servers/1') - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - res_dict = json.loads(res.body) - self.assertEqual(res_dict['server']['status'], 'SHUTOFF') + request = webob.Request.blank('/v1.0/servers/1') + response = request.get_response(fakes.wsgi_app()) + self.assertEqual(response.status_int, 200) + return json.loads(response.body) + + def test_active(self): + response = self._get_with_state(vm_states.ACTIVE) + self.assertEqual(response['server']['status'], 'ACTIVE') + + def test_reboot(self): + response = self._get_with_state(vm_states.REBOOT) + self.assertEqual(response['server']['status'], 'REBOOT') + + def test_hard_reboot(self): + response = self._get_with_state(vm_states.HARD_REBOOT) + self.assertEqual(response['server']['status'], 'HARD_REBOOT') + + def test_rebuild(self): + response = self._get_with_state(vm_states.REBUILD) + self.assertEqual(response['server']['status'], 'REBUILD') + + def test_rebuild_error(self): + response = self._get_with_state(vm_states.ERROR) + self.assertEqual(response['server']['status'], 'ERROR') + + def test_resize(self): + response = self._get_with_state(vm_states.RESIZE) + self.assertEqual(response['server']['status'], 'RESIZE') + + def test_verify_resize(self): + response = self._get_with_state(vm_states.VERIFY_RESIZE) + self.assertEqual(response['server']['status'], 'VERIFY_RESIZE') + + def test_password_update(self): + response = self._get_with_state(vm_states.ACTIVE, task_state.PASSWORD) + self.assertEqual(response['server']['status'], 'PASSWORD') + + def test_stopped(self): + response = self._get_with_state(vm_states.STOP) + self.assertEqual(response['server']['status'], 'STOPPED') class TestServerCreateRequestXMLDeserializerV10(unittest.TestCase): @@ -3018,8 +3048,8 @@ class ServersViewBuilderV11Test(test.TestCase): "launch_index": 0, "key_name": "", "key_data": "", - "state": 0, - "state_description": "", + "vm_state": vm_states.BUILD, + "task_state": None, "memory_mb": 0, "vcpus": 0, "local_gb": 0, @@ -3132,7 +3162,7 @@ class ServersViewBuilderV11Test(test.TestCase): def test_build_server_detail_active_status(self): #set the power state of the instance to running - self.instance['state'] = 1 + self.instance['vm_state'] = vm_states.ACTIVE image_bookmark = "http://localhost/images/5" flavor_bookmark = "http://localhost/flavors/1" expected_server = { diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 725f6d529..deeb3d008 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -28,6 +28,17 @@ LOG = logging.getLogger('nova.tests.integrated') class ServersTest(integrated_helpers._IntegratedTestBase): + def _wait_for_creation(self, server): + retries = 0 + while server['status'] == 'BUILD': + time.sleep(1) + server = self.api.get_server(server['id']) + print server + retries = retries + 1 + if retries > 8: + break + return server + def test_get_servers(self): """Simple check that listing servers works.""" servers = self.api.get_servers() @@ -36,7 +47,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): def test_create_and_delete_server(self): """Creates and deletes a server.""" - + self.flags(stub_network=True) # Create server # Build the server data gradually, checking errors along the way @@ -91,19 +102,11 @@ class ServersTest(integrated_helpers._IntegratedTestBase): server_ids = [server['id'] for server in servers] self.assertTrue(created_server_id in server_ids) - # Wait (briefly) for creation - retries = 0 - while found_server['status'] == 'build': - LOG.debug("found server: %s" % found_server) - time.sleep(1) - found_server = self.api.get_server(created_server_id) - retries = retries + 1 - if retries > 5: - break + found_server = self._wait_for_creation(found_server) # It should be available... # TODO(justinsb): Mock doesn't yet do this... - #self.assertEqual('available', found_server['status']) + self.assertEqual('ACTIVE', found_server['status']) servers = self.api.get_servers(detail=True) for server in servers: self.assertTrue("image" in server) @@ -190,6 +193,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): self.assertTrue(created_server['id']) created_server_id = created_server['id'] + created_server = self._wait_for_creation(created_server) + # rebuild the server with metadata post = {} post['rebuild'] = { diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index afd672c7a..dd38420ce 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -23,6 +23,8 @@ import time from nova import db from nova import utils +from nova.compute import task_state +from nova.compute import vm_states def stub_out_db_instance_api(stubs): @@ -64,7 +66,8 @@ def stub_out_db_instance_api(stubs): 'image_ref': values['image_ref'], 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], - 'state_description': 'scheduling', + 'vm_state': vm_states.BUILD, + 'task_state': task_state.SCHEDULE, 'user_id': values['user_id'], 'project_id': values['project_id'], 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), -- cgit From f82d2d309a0f826522854fe331d1c53b8c6d6879 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 09:54:33 -0400 Subject: Ec2 API updates. --- nova/api/ec2/cloud.py | 25 ++++++++++++++++++++++++- nova/tests/test_cloud.py | 8 ++++---- nova/virt/fake.py | 12 ++++++------ 3 files changed, 34 insertions(+), 11 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 4b69cc272..b7c7d2e12 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -79,6 +79,29 @@ def _gen_key(context, user_id, key_name): return {'private_key': private_key, 'fingerprint': fingerprint} +# EC2 API: Valid Values: +# pending | running | shutting-down | terminated | stopping | stopped +_STATE_DESCRIPTION_MAP = { + vm_states.ACTIVE: 'running', + vm_states.BUILD: 'pending', + vm_states.REBUILD: 'pending', + vm_states.REBOOT: 'reboot', + vm_states.DELETE: 'terminated', + vm_states.STOP: 'stopped', + vm_states.MIGRATE: 'migrate', + vm_states.RESIZE: 'resize', + vm_states.VERIFY_RESIZE: 'verify_resize', + vm_states.PAUSE: 'pause', + vm_states.SUSPEND: 'suspend', + vm_states.RESCUE: 'rescue' +} + + +def state_description_from_vm_state(vm_state): + """Map the vm state to the server status string""" + return _STATE_DESCRIPTION_MAP[vm_state] + + # TODO(yamahata): hypervisor dependent default device name _DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1' _DEFAULT_MAPPINGS = {'ami': 'sda1', @@ -1199,7 +1222,7 @@ class CloudController(object): self._format_ramdisk_id(instance, i, 'ramdiskId') i['instanceState'] = { 'code': instance['power_state'], - 'name': instance['vm_state']} #FIXME + 'name': state_description_from_vm_state(instance['vm_state'])} fixed_addr = None floating_addr = None if instance['fixed_ips']: diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 0793784f8..cce9514ec 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -1163,7 +1163,7 @@ class CloudTestCase(test.TestCase): self.compute = self.start_service('compute') def _wait_for_state(self, ctxt, instance_id, predicate): - """Wait for an stopping instance to be a given state""" + """Wait for a stopped instance to be a given state""" id = ec2utils.ec2_id_to_id(instance_id) while True: info = self.cloud.compute_api.get(context=ctxt, instance_id=id) @@ -1174,12 +1174,12 @@ class CloudTestCase(test.TestCase): def _wait_for_running(self, instance_id): def is_running(info): - return info['state_description'] == 'running' + return info['vm_state'] == 'running' self._wait_for_state(self.context, instance_id, is_running) def _wait_for_stopped(self, instance_id): def is_stopped(info): - return info['state_description'] == 'stopped' + return info['vm_state'] == 'stopped' self._wait_for_state(self.context, instance_id, is_stopped) def _wait_for_terminate(self, instance_id): @@ -1562,7 +1562,7 @@ class CloudTestCase(test.TestCase): 'id': 0, 'root_device_name': '/dev/sdh', 'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}], - 'state_description': 'stopping', + 'vm_state': 'stopped', 'instance_type': {'name': 'fake_type'}, 'kernel_id': 1, 'ramdisk_id': 2, diff --git a/nova/virt/fake.py b/nova/virt/fake.py index dc0628772..b42f4ca2f 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -42,9 +42,9 @@ def get_connection(_): class FakeInstance(object): - def __init__(self, name, state): + def __init__(self, name, power_state): self.name = name - self.state = state + self.power_state = power_state class FakeConnection(driver.ComputeDriver): @@ -120,7 +120,7 @@ class FakeConnection(driver.ComputeDriver): def _map_to_instance_info(self, instance): instance = utils.check_isinstance(instance, FakeInstance) - info = driver.InstanceInfo(instance.name, instance.state) + info = driver.InstanceInfo(instance.name, instance.power_state) return info def list_instances_detail(self): @@ -150,8 +150,8 @@ class FakeConnection(driver.ComputeDriver): """ name = instance.name - state = power_state.RUNNING - fake_instance = FakeInstance(name, state) + pstate = power_state.RUNNING + fake_instance = FakeInstance(name, pstate) self.instances[name] = fake_instance def snapshot(self, context, instance, name): @@ -325,7 +325,7 @@ class FakeConnection(driver.ComputeDriver): if instance_name not in self.instances: raise exception.InstanceNotFound(instance_id=instance_name) i = self.instances[instance_name] - return {'state': i.state, + return {'state': i.power_state, 'max_mem': 0, 'mem': 0, 'num_cpu': 2, -- cgit From 44aea954e5efa7d94d8333ddbf54dab6464018a0 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 10:01:13 -0400 Subject: Renamed task_state to task_states... --- nova/api/openstack/common.py | 6 ++-- nova/compute/manager.py | 30 ++++++++--------- nova/compute/task_state.py | 43 ------------------------- nova/compute/task_states.py | 43 +++++++++++++++++++++++++ nova/tests/api/openstack/test_server_actions.py | 1 - nova/tests/vmwareapi/db_fakes.py | 4 +-- 6 files changed, 63 insertions(+), 64 deletions(-) delete mode 100644 nova/compute/task_state.py create mode 100644 nova/compute/task_states.py diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index eae0fd916..778c1e514 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -28,7 +28,7 @@ from nova import log as logging from nova import quota from nova.api.openstack import wsgi from nova.compute import vm_states -from nova.compute import task_state +from nova.compute import task_states LOG = logging.getLogger('nova.api.openstack.common') @@ -56,9 +56,9 @@ _STATE_MAP = { } -def status_from_state(_vm_state, _task_state=None): +def status_from_state(_vm_state, task_state=None): """Given vm_state and task_state, return a status string.""" - if _vm_state == vm_states.ACTIVE and _task_state == task_state.PASSWORD: + if _vm_state == vm_states.ACTIVE and task_state == task_states.PASSWORD: return "PASSWORD" return _STATE_MAP.get(_vm_state, "UNKNOWN_STATE") diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 4be5bdd69..5a4f62b76 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -56,7 +56,7 @@ from nova import rpc from nova import utils from nova import volume from nova.compute import power_state -from nova.compute import task_state +from nova.compute import task_states from nova.compute import vm_states from nova.notifier import api as notifier from nova.compute.utils import terminate_volumes @@ -373,7 +373,7 @@ class ComputeManager(manager.SchedulerDependentManager): updates['host'] = self.host updates['launched_on'] = self.host updates['vm_state'] = vm_states.BUILD - updates['task_state'] = task_state.NETWORKING + updates['task_state'] = task_states.NETWORKING instance = self.db.instance_update(context, instance_id, updates) instance['injected_files'] = kwargs.get('injected_files', []) instance['admin_pass'] = kwargs.get('admin_password', None) @@ -398,7 +398,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.BUILD, - task_state=task_state.BLOCK_DEVICE_MAPPING) + task_state=task_states.BLOCK_DEVICE_MAPPING) (swap, ephemerals, block_device_mapping) = self._setup_block_device_mapping( @@ -411,7 +411,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - task_state=task_state.SPAWN) + task_state=task_states.SPAWN) # TODO(vish) check to make sure the availability zone matches try: @@ -524,7 +524,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, power_state=current_power_state, vm_state=vm_states.REBUILD, - task_state=task_state.REBUILDING) + task_state=task_states.REBUILDING) network_info = self._get_instance_nw_info(context, instance_ref) self.driver.destroy(instance_ref, network_info) @@ -532,7 +532,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.REBUILD, - task_state=task_state.BLOCK_DEVICE_MAPPING) + task_state=task_states.BLOCK_DEVICE_MAPPING) bd_mapping = self._setup_block_device_mapping(context, instance_id) @@ -545,7 +545,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.REBUILD, - task_state=task_state.SPAWN) + task_state=task_states.SPAWN) self.driver.spawn(context, instance_ref, network_info, bd_mapping) @@ -578,7 +578,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id, power_state=current_power_state, vm_state=vm_states.REBOOT, - task_state=task_state.REBOOTING) + task_state=task_states.REBOOTING) if instance_ref['power_state'] != power_state.RUNNING: state = instance_ref['power_state'] @@ -788,7 +788,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.RESCUE, - task_state=task_state.RESCUING) + task_state=task_states.RESCUING) instance_ref = self.db.instance_get(context, instance_id) network_info = self._get_instance_nw_info(context, instance_ref) @@ -800,7 +800,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.RESCUE, - task_state=task_state.RESCUED, + task_state=task_states.RESCUED, power_state=current_power_state) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -813,7 +813,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.ACTIVE, - task_state=task_state.UNRESCUING) + task_state=task_states.UNRESCUING) instance_ref = self.db.instance_get(context, instance_id) network_info = self._get_instance_nw_info(context, instance_ref) @@ -1049,7 +1049,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.PAUSE, - task_state=task_state.PAUSING) + task_state=task_states.PAUSING) instance_ref = self.db.instance_get(context, instance_id) self.driver.pause(instance_ref, lambda result: None) @@ -1071,7 +1071,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.ACTIVE, - task_state=task_state.UNPAUSING) + task_state=task_states.UNPAUSING) instance_ref = self.db.instance_get(context, instance_id) self.driver.unpause(instance_ref, lambda result: None) @@ -1112,7 +1112,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.SUSPEND, - task_state=task_state.SUSPENDING) + task_state=task_states.SUSPENDING) instance_ref = self.db.instance_get(context, instance_id) self.driver.suspend(instance_ref, lambda result: None) @@ -1134,7 +1134,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.ACTIVE, - task_state=task_state.RESUMING) + task_state=task_states.RESUMING) instance_ref = self.db.instance_get(context, instance_id) self.driver.resume(instance_ref, lambda result: None) diff --git a/nova/compute/task_state.py b/nova/compute/task_state.py deleted file mode 100644 index 55466c783..000000000 --- a/nova/compute/task_state.py +++ /dev/null @@ -1,43 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Possible task states for instances""" - -BLOCK_DEVICE_MAPPING='block_device_mapping' -NETWORKING='networking' -SPAWN='spawn' - -SNAPSHOT='snapshot' -BACKUP='backup' -PASSWORD='password' - -RESIZE_PREP='resize_prep' -RESIZE_MIGRATING='resize_migrating' -RESIZE_MIGRATED='resize_migrated' -RESIZE_FINISH='resize_finish' - -REBUILDING='rebuilding' - -REBOOTING='rebooting' -PAUSING='pausing' -UNPAUSING='unpausing' -SUSPENDING='suspending' -RESUMING='resuming' - -RESCUING='rescuing' -RESCUED='rescued' -UNRESCUING='unrescuing' diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py new file mode 100644 index 000000000..55466c783 --- /dev/null +++ b/nova/compute/task_states.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Possible task states for instances""" + +BLOCK_DEVICE_MAPPING='block_device_mapping' +NETWORKING='networking' +SPAWN='spawn' + +SNAPSHOT='snapshot' +BACKUP='backup' +PASSWORD='password' + +RESIZE_PREP='resize_prep' +RESIZE_MIGRATING='resize_migrating' +RESIZE_MIGRATED='resize_migrated' +RESIZE_FINISH='resize_finish' + +REBUILDING='rebuilding' + +REBOOTING='rebooting' +PAUSING='pausing' +UNPAUSING='unpausing' +SUSPENDING='suspending' +RESUMING='resuming' + +RESCUING='rescuing' +RESCUED='rescued' +UNRESCUING='unrescuing' diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 6f8be0f47..011f83773 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -13,7 +13,6 @@ from nova import flags from nova.api.openstack import create_instance_helper from nova.compute import instance_types from nova.compute import vm_states -from nova.compute import task_state import nova.db.api from nova import test from nova.tests.api.openstack import common diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index dd38420ce..b046071c7 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -23,7 +23,7 @@ import time from nova import db from nova import utils -from nova.compute import task_state +from nova.compute import task_states from nova.compute import vm_states @@ -67,7 +67,7 @@ def stub_out_db_instance_api(stubs): 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], 'vm_state': vm_states.BUILD, - 'task_state': task_state.SCHEDULE, + 'task_state': task_states.SCHEDULE, 'user_id': values['user_id'], 'project_id': values['project_id'], 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), -- cgit From 0ea797cd8e709d910c428234417fb179bdfd1525 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 10:50:05 -0400 Subject: Update virt/fake to correct power state issue. --- nova/virt/fake.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index b42f4ca2f..c12ee3ab8 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -42,9 +42,9 @@ def get_connection(_): class FakeInstance(object): - def __init__(self, name, power_state): + def __init__(self, name, state): self.name = name - self.power_state = power_state + self.state = state class FakeConnection(driver.ComputeDriver): @@ -150,8 +150,8 @@ class FakeConnection(driver.ComputeDriver): """ name = instance.name - pstate = power_state.RUNNING - fake_instance = FakeInstance(name, pstate) + state = power_state.RUNNING + fake_instance = FakeInstance(name, state) self.instances[name] = fake_instance def snapshot(self, context, instance, name): @@ -325,7 +325,7 @@ class FakeConnection(driver.ComputeDriver): if instance_name not in self.instances: raise exception.InstanceNotFound(instance_id=instance_name) i = self.instances[instance_name] - return {'state': i.power_state, + return {'state': i.state, 'max_mem': 0, 'mem': 0, 'num_cpu': 2, -- cgit From a450c0f3bcc93fe3ec74939e49b109cb02624913 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 11:09:24 -0400 Subject: Update migration number. --- .../versions/039_update_instance_states.py | 57 ---------------------- .../versions/040_update_instance_states.py | 57 ++++++++++++++++++++++ 2 files changed, 57 insertions(+), 57 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py deleted file mode 100644 index 07efbf90f..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/039_update_instance_states.py +++ /dev/null @@ -1,57 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, Column, String - -meta = MetaData() - -c_task_state = Column('task_state', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.state - c_state.alter(name='power_state') - - c_vm_state = instances.c.state_description - c_vm_state.alter(name='vm_state') - - instances.create_column(c_task_state) - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.power_state - c_state.alter(name='state') - - c_vm_state = instances.c.vm_state - c_vm_state.alter(name='state_description') - - instances.drop_column('task_state') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py new file mode 100644 index 000000000..07efbf90f --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + +meta = MetaData() + +c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.state + c_state.alter(name='power_state') + + c_vm_state = instances.c.state_description + c_vm_state.alter(name='vm_state') + + instances.create_column(c_task_state) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.power_state + c_state.alter(name='state') + + c_vm_state = instances.c.vm_state + c_vm_state.alter(name='state_description') + + instances.drop_column('task_state') -- cgit From c2736787be23d0893e2d4aebcc2cad6fdc5c2bd1 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Mon, 22 Aug 2011 11:57:42 -0400 Subject: Fix scheduler and integrated tests. --- nova/tests/integrated/test_servers.py | 11 +++++++++-- nova/tests/scheduler/test_scheduler.py | 6 +++--- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index deeb3d008..0e3a6eefb 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -35,7 +35,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): server = self.api.get_server(server['id']) print server retries = retries + 1 - if retries > 8: + if retries > 5: break return server @@ -48,8 +48,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): def test_create_and_delete_server(self): """Creates and deletes a server.""" self.flags(stub_network=True) - # Create server + # Create server # Build the server data gradually, checking errors along the way server = {} good_server = self._build_minimal_create_server_request() @@ -184,6 +184,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): def test_create_and_rebuild_server(self): """Rebuild a server.""" + self.flags(stub_network=True) # create a server with initially has no metadata server = self._build_minimal_create_server_request() @@ -216,6 +217,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): def test_create_and_rebuild_server_with_metadata(self): """Rebuild a server with metadata.""" + self.flags(stub_network=True) # create a server with initially has no metadata server = self._build_minimal_create_server_request() @@ -225,6 +227,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): self.assertTrue(created_server['id']) created_server_id = created_server['id'] + created_server = self._wait_for_creation(created_server) + # rebuild the server with metadata post = {} post['rebuild'] = { @@ -252,6 +256,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): def test_create_and_rebuild_server_with_metadata_removal(self): """Rebuild a server with metadata.""" + self.flags(stub_network=True) # create a server with initially has no metadata server = self._build_minimal_create_server_request() @@ -268,6 +273,8 @@ class ServersTest(integrated_helpers._IntegratedTestBase): self.assertTrue(created_server['id']) created_server_id = created_server['id'] + created_server = self._wait_for_creation(created_server) + # rebuild the server with metadata post = {} post['rebuild'] = { diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 629019eaf..a1281ae73 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -95,7 +95,7 @@ class SchedulerTestCase(test.TestCase): inst['vcpus'] = kwargs.get('vcpus', 1) inst['memory_mb'] = kwargs.get('memory_mb', 10) inst['local_gb'] = kwargs.get('local_gb', 20) - inst['vm_state'] = kwargs.get('vm_state', vm_state.ACTIVE) + inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE) inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) inst['task_state'] = kwargs.get('task_state', None) return db.instance_create(ctxt, inst) @@ -275,7 +275,7 @@ class SimpleDriverTestCase(test.TestCase): inst['memory_mb'] = kwargs.get('memory_mb', 20) inst['local_gb'] = kwargs.get('local_gb', 30) inst['launched_on'] = kwargs.get('launghed_on', 'dummy') - inst['vm_state'] = kwargs.get('vm_state', vm_state.ACTIVE) + inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE) inst['task_state'] = kwargs.get('task_state', None) inst['power_state'] = kwargs.get('power_state', power_state.RUNNING) return db.instance_create(self.context, inst)['id'] @@ -669,7 +669,7 @@ class SimpleDriverTestCase(test.TestCase): block_migration=False) i_ref = db.instance_get(self.context, instance_id) - self.assertTrue(i_ref['vm_state'] == vm_state.MIGRATE) + self.assertTrue(i_ref['vm_state'] == vm_states.MIGRATE) db.instance_destroy(self.context, instance_id) db.volume_destroy(self.context, v_ref['id']) -- cgit From d60f813201df345507ce0aca7bed0f8b719aabfe Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 11:59:08 -0400 Subject: Fixes/updates to make test_cloud pass. --- nova/api/ec2/cloud.py | 1 + nova/compute/manager.py | 2 +- nova/tests/test_cloud.py | 7 ++++--- nova/virt/fake.py | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index b7c7d2e12..8bddf3032 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -82,6 +82,7 @@ def _gen_key(context, user_id, key_name): # EC2 API: Valid Values: # pending | running | shutting-down | terminated | stopping | stopped _STATE_DESCRIPTION_MAP = { + None: 'pending', vm_states.ACTIVE: 'running', vm_states.BUILD: 'pending', vm_states.REBUILD: 'pending', diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 5a4f62b76..75928f7ef 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1706,7 +1706,7 @@ class ComputeManager(manager.SchedulerDependentManager): if vm_instance is None: vm_power_state = power_state.NOSTATE else: - vm_power_state = vm_instance["power_state"] + vm_power_state = vm_instance.state if vm_power_state == db_power_state: continue diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index cce9514ec..4d148f39e 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -38,6 +38,7 @@ from nova import test from nova import utils from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils +from nova.compute import vm_states from nova.image import fake @@ -1174,12 +1175,12 @@ class CloudTestCase(test.TestCase): def _wait_for_running(self, instance_id): def is_running(info): - return info['vm_state'] == 'running' + return info['vm_state'] == vm_states.ACTIVE self._wait_for_state(self.context, instance_id, is_running) def _wait_for_stopped(self, instance_id): def is_stopped(info): - return info['vm_state'] == 'stopped' + return info['vm_state'] == vm_states.STOP self._wait_for_state(self.context, instance_id, is_stopped) def _wait_for_terminate(self, instance_id): @@ -1562,7 +1563,7 @@ class CloudTestCase(test.TestCase): 'id': 0, 'root_device_name': '/dev/sdh', 'security_groups': [{'name': 'fake0'}, {'name': 'fake1'}], - 'vm_state': 'stopped', + 'vm_state': vm_states.STOP, 'instance_type': {'name': 'fake_type'}, 'kernel_id': 1, 'ramdisk_id': 2, diff --git a/nova/virt/fake.py b/nova/virt/fake.py index c12ee3ab8..dc0628772 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -120,7 +120,7 @@ class FakeConnection(driver.ComputeDriver): def _map_to_instance_info(self, instance): instance = utils.check_isinstance(instance, FakeInstance) - info = driver.InstanceInfo(instance.name, instance.power_state) + info = driver.InstanceInfo(instance.name, instance.state) return info def list_instances_detail(self): -- cgit From 393c9375626812ecb904d9048c833b0d110e9aa8 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 22 Aug 2011 13:04:05 -0400 Subject: Use 'vm_state' instead of 'state' in instance filters query. --- nova/db/sqlalchemy/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index a5ed2363f..3e690e094 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1239,7 +1239,7 @@ def instance_get_all_by_filters(context, filters): # Filters for exact matches that we can do along with the SQL query... # For other filters that don't match this, we will do regexp matching exact_match_filter_names = ['project_id', 'user_id', 'image_ref', - 'state', 'instance_type_id', 'deleted'] + 'vm_state', 'instance_type_id', 'deleted'] query_filters = [key for key in filters.iterkeys() if key in exact_match_filter_names] -- cgit From ea3684d2a2e60f19bdea6b3117be613103a605dc Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Mon, 22 Aug 2011 13:16:48 -0400 Subject: Fixes for a number of tests. --- nova/compute/api.py | 3 +++ nova/compute/task_states.py | 1 + nova/tests/vmwareapi/db_fakes.py | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 3664be5ed..0f993015b 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -36,6 +36,7 @@ from nova import utils from nova import volume from nova.compute import instance_types from nova.compute import power_state +from nova.compute import task_states from nova.compute import vm_states from nova.compute.utils import terminate_volumes from nova.scheduler import api as scheduler_api @@ -397,6 +398,8 @@ class API(base.Base): updates['display_name'] = "Server %s" % instance_id instance['display_name'] = updates['display_name'] updates['hostname'] = self.hostname_factory(instance) + updates['vm_state'] = vm_states.BUILD + updates['task_state'] = task_states.SCHEDULING instance = self.update(context, instance_id, **updates) return instance diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py index 55466c783..885a30ebe 100644 --- a/nova/compute/task_states.py +++ b/nova/compute/task_states.py @@ -17,6 +17,7 @@ """Possible task states for instances""" +SCHEDULING='scheduling' BLOCK_DEVICE_MAPPING='block_device_mapping' NETWORKING='networking' SPAWN='spawn' diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py index b046071c7..b56956f96 100644 --- a/nova/tests/vmwareapi/db_fakes.py +++ b/nova/tests/vmwareapi/db_fakes.py @@ -67,7 +67,7 @@ def stub_out_db_instance_api(stubs): 'kernel_id': values['kernel_id'], 'ramdisk_id': values['ramdisk_id'], 'vm_state': vm_states.BUILD, - 'task_state': task_states.SCHEDULE, + 'task_state': task_states.SCHEDULING, 'user_id': values['user_id'], 'project_id': values['project_id'], 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), -- cgit From 7726b3d763a136347f2324e630f0a3cdc60a045b Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 22 Aug 2011 14:08:03 -0700 Subject: Simple usage extension for nova. Uses db to calculate tenant_usage for specified time periods. Methods: index: return a list of tenant_usages, with option of incuding detailed server_usage show: returns a specific tenant_usage object tenant_usage object: tenant_usage.total_memory_mb_usage: sum of memory_mb * hours for all instances in tenant for this period tenant_usage.total_local_gb_usage: sum of local_gb * hours for all instances in tenant for this period tenant_usage.total_vcpus_usage: sum of vcpus * hours for all instances in tenant for this period tenant_usage.total_hours: sum of all instance hours for this period tenant_usage.server_usages: A detailed list of server_usages, which describe the usage of a specific server For larger instances db tables, indexes on instance.launched_at and instance.terminated_at should significantly help performance. --- nova/api/openstack/contrib/simple_tenant_usage.py | 268 +++++++++++++++++++++ .../openstack/contrib/test_simple_tenant_usage.py | 189 +++++++++++++++ 2 files changed, 457 insertions(+) create mode 100644 nova/api/openstack/contrib/simple_tenant_usage.py create mode 100644 nova/tests/api/openstack/contrib/test_simple_tenant_usage.py diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py new file mode 100644 index 000000000..d578b2b67 --- /dev/null +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -0,0 +1,268 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import urlparse +import webob + +from datetime import datetime +from nova import db +from nova import exception +from nova import flags +from nova.compute import instance_types +from nova.api.openstack import extensions +from nova.api.openstack import views +from nova.db.sqlalchemy.session import get_session +from webob import exc + + +FLAGS = flags.FLAGS + +INSTANCE_FIELDS = ['id', + 'image_ref', + 'project_id', + 'user_id', + 'display_name', + 'state_description', + 'instance_type_id', + 'launched_at', + 'terminated_at'] + + +class SimpleTenantUsageController(object): + + def _get_instances_for_time_period(self, period_start, period_stop, + tenant_id): + tenant_clause = '' + if tenant_id: + tenant_clause = " and project_id='%s'" % tenant_id + + conn = get_session().connection() + rows = conn.execute("select %s from instances where \ + (terminated_at is NULL or terminated_at > '%s') \ + and (launched_at < '%s') %s" %\ + (','.join(INSTANCE_FIELDS), + period_start.isoformat(' '),\ + period_stop.isoformat(' '), + tenant_clause)).fetchall() + + return rows + + def _hours_for(self, instance, period_start, period_stop): + launched_at = instance['launched_at'] + terminated_at = instance['terminated_at'] + if terminated_at is not None: + if not isinstance(terminated_at, datetime): + terminated_at = datetime.strptime(terminated_at, + "%Y-%m-%d %H:%M:%S.%f") + + if launched_at is not None: + if not isinstance(launched_at, datetime): + launched_at = datetime.strptime(launched_at, + "%Y-%m-%d %H:%M:%S.%f") + + if terminated_at and terminated_at < period_start: + return 0 + # nothing if it started after the usage report ended + if launched_at and launched_at > period_stop: + return 0 + if launched_at: + # if instance launched after period_started, don't charge for first + start = max(launched_at, period_start) + if terminated_at: + # if instance stopped before period_stop, don't charge after + stop = min(period_stop, terminated_at) + else: + # instance is still running, so charge them up to current time + stop = period_stop + dt = stop - start + seconds = dt.days * 3600 * 24 + dt.seconds\ + + dt.microseconds / 100000.0 + + return seconds / 3600.0 + else: + # instance hasn't launched, so no charge + return 0 + + def _usage_for_period(self, context, period_start, + period_stop, tenant_id=None, detailed=True): + + rows = self._get_instances_for_time_period(period_start, + period_stop, + tenant_id) + rval = {} + flavors = {} + + for row in rows: + info = {} + for i in range(len(INSTANCE_FIELDS)): + info[INSTANCE_FIELDS[i]] = row[i] + info['hours'] = self._hours_for(info, period_start, period_stop) + flavor_type = info['instance_type_id'] + + if not flavors.get(flavor_type): + try: + flavors[flavor_type] = db.instance_type_get(context, + info['instance_type_id']) + except exception.InstanceTypeNotFound: + # can't bill if there is no instance type + continue + + flavor = flavors[flavor_type] + + info['name'] = info['display_name'] + del(info['display_name']) + + info['memory_mb'] = flavor['memory_mb'] + info['local_gb'] = flavor['local_gb'] + info['vcpus'] = flavor['vcpus'] + + info['tenant_id'] = info['project_id'] + del(info['project_id']) + + info['flavor'] = flavor['name'] + del(info['instance_type_id']) + + info['started_at'] = info['launched_at'] + del(info['launched_at']) + + info['ended_at'] = info['terminated_at'] + del(info['terminated_at']) + + if info['ended_at']: + info['state'] = 'terminated' + else: + info['state'] = info['state_description'] + + del(info['state_description']) + + now = datetime.utcnow() + + if info['state'] == 'terminated': + delta = self._parse_datetime(info['ended_at'])\ + - self._parse_datetime(info['started_at']) + else: + delta = now - self._parse_datetime(info['started_at']) + + info['uptime'] = delta.days * 24 * 60 + delta.seconds + + if not info['tenant_id'] in rval: + summary = {} + summary['tenant_id'] = info['tenant_id'] + if detailed: + summary['server_usages'] = [] + summary['total_local_gb_usage'] = 0 + summary['total_vcpus_usage'] = 0 + summary['total_memory_mb_usage'] = 0 + summary['total_hours'] = 0 + summary['start'] = period_start + summary['stop'] = period_stop + rval[info['tenant_id']] = summary + + summary = rval[info['tenant_id']] + summary['total_local_gb_usage'] += info['local_gb'] * info['hours'] + summary['total_vcpus_usage'] += info['vcpus'] * info['hours'] + summary['total_memory_mb_usage'] += info['memory_mb']\ + * info['hours'] + + summary['total_hours'] += info['hours'] + if detailed: + summary['server_usages'].append(info) + + return rval.values() + + def _parse_datetime(self, dtstr): + if isinstance(dtstr, datetime): + return dtstr + try: + return datetime.strptime(dtstr, "%Y-%m-%dT%H:%M:%S") + except: + try: + return datetime.strptime(dtstr, "%Y-%m-%dT%H:%M:%S.%f") + except: + return datetime.strptime(dtstr, "%Y-%m-%d %H:%M:%S.%f") + + def _get_datetime_range(self, req): + qs = req.environ.get('QUERY_STRING', '') + env = urlparse.parse_qs(qs) + period_start = self._parse_datetime(env.get('start', + [datetime.utcnow().isoformat()])[0]) + period_stop = self._parse_datetime(env.get('end', + [datetime.utcnow().isoformat()])[0]) + + detailed = bool(env.get('detailed', False)) + return (period_start, period_stop, detailed) + + def index(self, req): + """Retrive tenant_usage for all tenants""" + (period_start, period_stop, detailed) = self._get_datetime_range(req) + context = req.environ['nova.context'] + + if not context.is_admin and FLAGS.allow_admin_api: + return webob.Response(status_int=403) + + usages = self._usage_for_period(context, + period_start, + period_stop, + detailed=detailed) + return {'tenant_usages': usages} + + def show(self, req, id): + """Retrive tenant_usage for a specified tenant""" + (period_start, period_stop, ignore) = self._get_datetime_range(req) + context = req.environ['nova.context'] + + if not context.is_admin and FLAGS.allow_admin_api: + if id != context.project_id: + return webob.Response(status_int=403) + + usage = self._usage_for_period(context, + period_start, + period_stop, + id, + detailed=True) + if len(usage): + usage = usage[0] + else: + usage = {} + return {'tenant_usage': usage} + + +class Simple_tenant_usage(extensions.ExtensionDescriptor): + + def get_name(self): + return "Simple_tenant_usage" + + def get_alias(self): + return "os-simple-tenant-usage" + + def get_description(self): + return "Simple tenant usage extension" + + def get_namespace(self): + return "http://docs.openstack.org/ext/os-simple-tenant-usage/api/v1.1" + + def get_updated(self): + return "2011-08-19T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-simple-tenant-usage', + SimpleTenantUsageController()) + resources.append(res) + + return resources diff --git a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py new file mode 100644 index 000000000..d20e36aaf --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py @@ -0,0 +1,189 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import json +import webob + +from nova import context +from nova import db +from nova import flags +from nova import test +from nova.compute import instance_types +from nova.db.sqlalchemy import models +from nova.db.sqlalchemy import session +from nova.tests.api.openstack import fakes +from webob import exc + +from nova.api.openstack.contrib import simple_tenant_usage + + +FLAGS = flags.FLAGS + +SERVERS = 5 +TENANTS = 2 +HOURS = 24 +LOCAL_GB = 10 +MEMORY_MB = 1024 +VCPUS = 2 +STOP = datetime.datetime.utcnow() +START = STOP - datetime.timedelta(hours=HOURS) + + +def fake_get_session(): + class FakeFetcher(object): + def fetchall(fetcher_self): + # return 10 rows, 2 tenants, 5 servers each, each run for 1 day + return [get_fake_db_row(START, + STOP, + x, + "faketenant_%s" % (x / SERVERS)) + for x in xrange(TENANTS * SERVERS)] + + class FakeConn(object): + def execute(self, query): + return FakeFetcher() + + class FakeSession(object): + def connection(self): + return FakeConn() + + return FakeSession() + + +def fake_instance_type_get(context, instance_type_id): + return {'id': 1, + 'vcpus': VCPUS, + 'local_gb': LOCAL_GB, + 'memory_mb': MEMORY_MB, + 'name': + 'fakeflavor'} + + +def get_fake_db_row(start, end, instance_id, tenant_id): + return [instance_id, + '1', + tenant_id, + 'fakeuser', + 'name', + 'state', + 1, + start, + None] + + +class SimpleTenantUsageTest(test.TestCase): + def setUp(self): + super(SimpleTenantUsageTest, self).setUp() + self.stubs.Set(session, "get_session", + fake_get_session) + self.stubs.Set(db, "instance_type_get", + fake_instance_type_get) + self.admin_context = context.RequestContext('fakeadmin_0', + 'faketenant_0', + is_admin=True) + self.user_context = context.RequestContext('fakeadmin_0', + 'faketenant_0', + is_admin=False) + self.alt_user_context = context.RequestContext('fakeadmin_0', + 'faketenant_1', + is_admin=False) + FLAGS.allow_admin_api = True + + def test_verify_db_fields_exist_in_instance_model(self): + for field in simple_tenant_usage.INSTANCE_FIELDS: + self.assertTrue(field in models.Instance.__table__.columns) + + def test_verify_index(self): + req = webob.Request.blank( + '/v1.1/os-simple-tenant-usage?start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.admin_context)) + + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + usages = res_dict['tenant_usages'] + for i in xrange(TENANTS): + self.assertEqual(int(usages[i]['total_hours']), + SERVERS * HOURS) + self.assertEqual(int(usages[i]['total_local_gb_usage']), + SERVERS * LOCAL_GB * HOURS) + self.assertEqual(int(usages[i]['total_memory_mb_usage']), + SERVERS * MEMORY_MB * HOURS) + self.assertEqual(int(usages[i]['total_vcpus_usage']), + SERVERS * VCPUS * HOURS) + self.assertFalse(usages[i].get('server_usages')) + + def test_verify_detailed_index(self): + req = webob.Request.blank( + '/v1.1/os-simple-tenant-usage?detailed=1&start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.admin_context)) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + usages = res_dict['tenant_usages'] + for i in xrange(TENANTS): + servers = usages[i]['server_usages'] + for j in xrange(SERVERS): + self.assertEqual(int(servers[j]['hours']), HOURS) + + def test_verify_index_fails_for_nonadmin(self): + req = webob.Request.blank( + '/v1.1/os-simple-tenant-usage?detailed=1&start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 403) + + def test_verify_show(self): + req = webob.Request.blank( + '/v1.1/os-simple-tenant-usage/faketenant_0?start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_context)) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + + usage = res_dict['tenant_usage'] + servers = usage['server_usages'] + self.assertEqual(len(usage['server_usages']), SERVERS) + for j in xrange(SERVERS): + self.assertEqual(int(servers[j]['hours']), HOURS) + + def test_verify_show_cant_view_other_tenant(self): + req = webob.Request.blank( + '/v1.1/os-simple-tenant-usage/faketenant_0?start=%s&end=%s' % + (START.isoformat(), STOP.isoformat())) + req.method = "GET" + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.alt_user_context)) + self.assertEqual(res.status_int, 403) -- cgit From 4b0944731a25d3cfcd30358619376dedf2251701 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 22 Aug 2011 14:31:26 -0700 Subject: some readability fixes per ja feedback --- nova/api/openstack/contrib/simple_tenant_usage.py | 29 ++++++++++++----------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index d578b2b67..80d819365 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -97,8 +97,8 @@ class SimpleTenantUsageController(object): # instance hasn't launched, so no charge return 0 - def _usage_for_period(self, context, period_start, - period_stop, tenant_id=None, detailed=True): + def _tenant_usages_for_period(self, context, period_start, + period_stop, tenant_id=None, detailed=True): rows = self._get_instances_for_time_period(period_start, period_stop, @@ -208,32 +208,33 @@ class SimpleTenantUsageController(object): def index(self, req): """Retrive tenant_usage for all tenants""" - (period_start, period_stop, detailed) = self._get_datetime_range(req) context = req.environ['nova.context'] if not context.is_admin and FLAGS.allow_admin_api: return webob.Response(status_int=403) - usages = self._usage_for_period(context, - period_start, - period_stop, - detailed=detailed) + (period_start, period_stop, detailed) = self._get_datetime_range(req) + usages = self._tenant_usages_for_period(context, + period_start, + period_stop, + detailed=detailed) return {'tenant_usages': usages} def show(self, req, id): """Retrive tenant_usage for a specified tenant""" - (period_start, period_stop, ignore) = self._get_datetime_range(req) + tenant_id = id context = req.environ['nova.context'] if not context.is_admin and FLAGS.allow_admin_api: - if id != context.project_id: + if tenant_id != context.project_id: return webob.Response(status_int=403) - usage = self._usage_for_period(context, - period_start, - period_stop, - id, - detailed=True) + (period_start, period_stop, ignore) = self._get_datetime_range(req) + usage = self._tenant_usages_for_period(context, + period_start, + period_stop, + tenant_id=tenant_id, + detailed=True) if len(usage): usage = usage[0] else: -- cgit From de0a17310e7228aa96263243851a89fb016f9730 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Mon, 22 Aug 2011 15:21:31 -0700 Subject: remove extra spaces --- nova/api/openstack/contrib/simple_tenant_usage.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index 80d819365..5f4218237 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -43,7 +43,6 @@ INSTANCE_FIELDS = ['id', class SimpleTenantUsageController(object): - def _get_instances_for_time_period(self, period_start, period_stop, tenant_id): tenant_clause = '' @@ -243,7 +242,6 @@ class SimpleTenantUsageController(object): class Simple_tenant_usage(extensions.ExtensionDescriptor): - def get_name(self): return "Simple_tenant_usage" -- cgit From 2fbaac5e07b558d7829253915523f073b07e24d4 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 23 Aug 2011 10:57:47 -0400 Subject: PEP8 fixes --- nova/api/ec2/cloud.py | 18 ++++++++----- nova/compute/task_states.py | 63 ++++++++++++++++++++++----------------------- nova/compute/vm_states.py | 24 ++++++++--------- nova/tests/test_compute.py | 15 ++++++----- 4 files changed, 63 insertions(+), 57 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index a7a343938..c5a360426 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -93,7 +93,7 @@ _STATE_DESCRIPTION_MAP = { vm_states.VERIFY_RESIZE: 'verify_resize', vm_states.PAUSED: 'pause', vm_states.SUSPENDED: 'suspend', - vm_states.RESCUED: 'rescue' + vm_states.RESCUED: 'rescue', } @@ -104,10 +104,12 @@ def state_description_from_vm_state(vm_state): # TODO(yamahata): hypervisor dependent default device name _DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1' -_DEFAULT_MAPPINGS = {'ami': 'sda1', - 'ephemeral0': 'sda2', - 'root': _DEFAULT_ROOT_DEVICE_NAME, - 'swap': 'sda3'} +_DEFAULT_MAPPINGS = { + 'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': _DEFAULT_ROOT_DEVICE_NAME, + 'swap': 'sda3', +} def _parse_block_device_mapping(bdm): @@ -1064,8 +1066,10 @@ class CloudController(object): def _format_attr_instance_initiated_shutdown_behavior(instance, result): vm_state = instance['vm_state'] - state_to_value = {vm_states.STOPPED: 'stop', - vm_states.DELETED: 'terminate'} + state_to_value = { + vm_states.STOPPED: 'stop', + vm_states.DELETED: 'terminate', + } value = state_to_value.get(vm_state) if value: result['instanceInitiatedShutdownBehavior'] = value diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py index d186edc3f..5f78495ea 100644 --- a/nova/compute/task_states.py +++ b/nova/compute/task_states.py @@ -17,35 +17,34 @@ """Possible task states for instances""" -SCHEDULING='scheduling' -BLOCK_DEVICE_MAPPING='block_device_mapping' -NETWORKING='networking' -SPAWN='spawn' - -SNAPSHOTTING='snapshotting' -BACKING_UP='backing_up' -PASSWORD='password' - -RESIZE_PREP='resize_prep' -RESIZE_MIGRATING='resize_migrating' -RESIZE_MIGRATED='resize_migrated' -RESIZE_FINISH='resize_finish' -RESIZE_REVERTING='resize_reverting' -RESIZE_CONFIRMING='resize_confirming' - -REBUILDING='rebuilding' - -REBOOTING='rebooting' -HARD_REBOOTING='hard_rebooting' -PAUSING='pausing' -UNPAUSING='unpausing' -SUSPENDING='suspending' -RESUMING='resuming' - -RESCUING='rescuing' -RESCUED='rescued' -UNRESCUING='unrescuing' - -DELETING='deleting' -STOPPING='stopping' -STARTING='starting' +SCHEDULING = 'scheduling' +BLOCK_DEVICE_MAPPING = 'block_device_mapping' +NETWORKING = 'networking' +SPAWN = 'spawn' + +SNAPSHOTTING = 'snapshotting' +BACKING_UP = 'backing_up' +PASSWORD = 'password' + +RESIZE_PREP = 'resize_prep' +RESIZE_MIGRATING = 'resize_migrating' +RESIZE_MIGRATED = 'resize_migrated' +RESIZE_FINISH = 'resize_finish' +RESIZE_REVERTING = 'resize_reverting' +RESIZE_CONFIRMING = 'resize_confirming' + +REBUILDING = 'rebuilding' + +REBOOTING = 'rebooting' +HARD_REBOOTING = 'hard_rebooting' +PAUSING = 'pausing' +UNPAUSING = 'unpausing' +SUSPENDING = 'suspending' +RESUMING = 'resuming' + +RESCUING = 'rescuing' +UNRESCUING = 'unrescuing' + +DELETING = 'deleting' +STOPPING = 'stopping' +STARTING = 'starting' diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py index 342fa905e..560e6d688 100644 --- a/nova/compute/vm_states.py +++ b/nova/compute/vm_states.py @@ -17,18 +17,18 @@ """Possible vm states for instances""" -ACTIVE='active' -BUILDING='building' -REBUILDING='rebuilding' +ACTIVE = 'active' +BUILDING = 'building' +REBUILDING = 'rebuilding' -PAUSED='paused' -SUSPENDED='suspended' -RESCUED='rescued' -DELETED='deleted' -STOPPED='stopped' +PAUSED = 'paused' +SUSPENDED = 'suspended' +RESCUED = 'rescued' +DELETED = 'deleted' +STOPPED = 'stopped' -MIGRATING='migrating' -RESIZING='resizing' -VERIFY_RESIZE='verify_resize' +MIGRATING = 'migrating' +RESIZING = 'resizing' +VERIFY_RESIZE = 'verify_resize' -ERROR='error' +ERROR = 'error' diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 2cf694d2c..11e1fd540 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -1308,14 +1308,17 @@ class ComputeTestCase(test.TestCase): """Test searching instances by state""" c = context.get_admin_context() - instance_id1 = self._create_instance({'power_state': power_state.SHUTDOWN}) + instance_id1 = self._create_instance({ + 'power_state': power_state.SHUTDOWN, + }) instance_id2 = self._create_instance({ - 'id': 2, - 'power_state': power_state.RUNNING}) + 'id': 2, + 'power_state': power_state.RUNNING, + }) instance_id3 = self._create_instance({ - 'id': 10, - 'power_state': power_state.RUNNING}) - + 'id': 10, + 'power_state': power_state.RUNNING, + }) instances = self.compute_api.get_all(c, search_opts={'power_state': power_state.SUSPENDED}) self.assertEqual(len(instances), 0) -- cgit From 657e58113d481d5c03cb3395cd714846434675f0 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 23 Aug 2011 11:01:57 -0400 Subject: Updated migration number. --- .../versions/040_update_instance_states.py | 57 ---------------------- .../versions/042_update_instance_states.py | 57 ++++++++++++++++++++++ 2 files changed, 57 insertions(+), 57 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py deleted file mode 100644 index 07efbf90f..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/040_update_instance_states.py +++ /dev/null @@ -1,57 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import MetaData, Table, Column, String - -meta = MetaData() - -c_task_state = Column('task_state', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.state - c_state.alter(name='power_state') - - c_vm_state = instances.c.state_description - c_vm_state.alter(name='vm_state') - - instances.create_column(c_task_state) - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.power_state - c_state.alter(name='state') - - c_vm_state = instances.c.vm_state - c_vm_state.alter(name='state_description') - - instances.drop_column('task_state') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py new file mode 100644 index 000000000..07efbf90f --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py @@ -0,0 +1,57 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column, String + +meta = MetaData() + +c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.state + c_state.alter(name='power_state') + + c_vm_state = instances.c.state_description + c_vm_state.alter(name='vm_state') + + instances.create_column(c_task_state) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instances.c.power_state + c_state.alter(name='state') + + c_vm_state = instances.c.vm_state + c_vm_state.alter(name='state_description') + + instances.drop_column('task_state') -- cgit From da02fc6e4191bdbbb2015b78f9c3fe5045bb0460 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 23 Aug 2011 15:14:09 -0700 Subject: Fix not found exceptions to properly use ec2_ips for not found --- nova/api/ec2/__init__.py | 10 ++++++---- nova/exception.py | 1 + 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 5430f443d..363dad7cd 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -392,17 +392,19 @@ class Executor(wsgi.Application): except exception.InstanceNotFound as ex: LOG.info(_('InstanceNotFound raised: %s'), unicode(ex), context=context) - return self._error(req, context, type(ex).__name__, ex.message) + ec2_id = ec2utils.id_to_ec2_id(ex.kwargs['instance_id']) + message = ex.message % {'instance_id': ec2_id} + return self._error(req, context, type(ex).__name__, message) except exception.VolumeNotFound as ex: LOG.info(_('VolumeNotFound raised: %s'), unicode(ex), context=context) - ec2_id = ec2utils.id_to_ec2_vol_id(ex.volume_id) - message = _('Volume %s not found') % ec2_id + ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id']) + message = ex.message % {'volume_id': ec2_id} return self._error(req, context, type(ex).__name__, message) except exception.SnapshotNotFound as ex: LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), context=context) - ec2_id = ec2utils.id_to_ec2_snap_id(ex.snapshot_id) + ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id']) message = _('Snapshot %s not found') % ec2_id return self._error(req, context, type(ex).__name__, message) except exception.NotFound as ex: diff --git a/nova/exception.py b/nova/exception.py index 66740019b..5b86059d8 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -146,6 +146,7 @@ class NovaException(Exception): message = _("An unknown exception occurred.") def __init__(self, **kwargs): + self.kwargs = kwargs try: self._error_string = self.message % kwargs -- cgit From 8b3ac90bd53ec81e6669c6169969e1e8da3e2d4f Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 10:59:12 -0400 Subject: Commit with test data in migration. --- nova/compute/api.py | 1 - .../versions/042_update_instance_states.py | 165 ++++++++++++++++++--- nova/exception.py | 3 + 3 files changed, 149 insertions(+), 20 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index a373bed90..eeb8f47d9 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -761,7 +761,6 @@ class API(base.Base): self.update(context, instance_id, - vm_state=vm_states.ACTIVE, task_state=task_states.DELETING) host = instance['host'] diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py index 07efbf90f..e27b84176 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py @@ -14,44 +14,171 @@ # License for the specific language governing permissions and limitations # under the License. +import sqlalchemy from sqlalchemy import MetaData, Table, Column, String +from nova import log +from nova.compute import task_states +from nova.compute import vm_states +from nova.db.sqlalchemy import models + + +LOG = log.getLogger("farts") meta = MetaData() c_task_state = Column('task_state', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +_upgrade_translations = { + "stopping": { + "vm_state": vm_states.ACTIVE, + "task_state": task_states.STOPPING, + }, + "stopped": { + "vm_state": vm_states.STOPPED, + "task_state": None, + }, + "terminated": { + "vm_state": vm_states.DELETED, + "task_state": None, + }, + "terminating": { + "vm_state": vm_states.ACTIVE, + "task_state": task_states.DELETING, + }, + "running": { + "vm_state": vm_states.ACTIVE, + "task_state": None, + }, + "scheduling": { + "vm_state": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, + "migrating": { + "vm_state": vm_states.MIGRATING, + "task_state": None, + }, + "pending": { + "vm_state": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, +} + + +_downgrade_translations = { + vm_states.ACTIVE: { + None: "running", + task_states.DELETING: "terminating", + task_states.STOPPING: "stopping", + }, + vm_states.BUILDING: { + None: "pending", + task_states.SCHEDULING: "scheduling", + }, + vm_states.STOPPED: { + None: "stopped", + }, + vm_states.REBUILDING: { + None: "pending", + }, + vm_states.DELETED: { + None: "terminated", + }, + vm_states.MIGRATING: { + None: "migrating", + }, +} + + +def _insert_test_data(instance_table): + running_instance = models.Instance() + running_instance.state_description = "running" + stopped_instance = models.Instance() + stopped_instance.state_description = "stopped" + terminated_instance = models.Instance() + terminated_instance.state_description = "terminated" + migrating_instance = models.Instance() + migrating_instance.state_description = "migrating" + scheduling_instance = models.Instance() + scheduling_instance.state_description = "scheduling" + bad_instance = models.Instance() + bad_instance.state_description = "bad_state_description" + + instance_table.insert(running_instance).execute() + instance_table.insert(stopped_instance).execute() + instance_table.insert(terminated_instance).execute() + instance_table.insert(migrating_instance).execute() + instance_table.insert(scheduling_instance).execute() + instance_table.insert(bad_instance).execute() def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata + #migrate_engine.echo = True meta.bind = migrate_engine - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instances.c.state + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + _insert_test_data(instance_table) + for instance in instance_table.select().execute(): + LOG.info(instance) + c_state = instance_table.c.state c_state.alter(name='power_state') - c_vm_state = instances.c.state_description + c_vm_state = instance_table.c.state_description c_vm_state.alter(name='vm_state') - instances.create_column(c_task_state) + instance_table.create_column(c_task_state) + for old_state, values in _upgrade_translations.iteritems(): + new_values = { + "old_state": old_state, + "vm_state": values["vm_state"], + "task_state": values["task_state"], + } -def downgrade(migrate_engine): - meta.bind = migrate_engine + update = sqlalchemy.text("UPDATE instances SET task_state=:task_state " + "WHERE vm_state=:old_state") + migrate_engine.execute(update, **new_values) + + update = sqlalchemy.text("UPDATE instances SET vm_state=:vm_state " + "WHERE vm_state=:old_state") + migrate_engine.execute(update, **new_values) - instances = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) + for instance in instance_table.select().execute(): + LOG.info(instance) - c_state = instances.c.power_state + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + for old_vm_state, old_task_states in _downgrade_translations.iteritems(): + for old_task_state, new_state_desc in old_task_states.iteritems(): + if old_task_state: + update = sqlalchemy.text("UPDATE instances " + "SET vm_state=:new_state_desc " + "WHERE task_state=:old_task_state " + "AND vm_state=:old_vm_state") + migrate_engine.execute(update, locals()) + else: + update = sqlalchemy.text("UPDATE instances " + "SET vm_state=:new_state_desc " + "WHERE vm_state=:old_vm_state") + migrate_engine.execute(update, locals()) + + #c_state = instance_table.c.power_state c_state.alter(name='state') - c_vm_state = instances.c.vm_state + #c_vm_state = instance_table.c.vm_state c_vm_state.alter(name='state_description') - instances.drop_column('task_state') + instance_table.drop_column('task_state') + + for instance in instance_table.select().execute(): + LOG.info(instance) + + raise Exception() diff --git a/nova/exception.py b/nova/exception.py index 44af8177e..66740019b 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -318,6 +318,9 @@ class InvalidEc2Id(Invalid): class NotFound(NovaException): message = _("Resource could not be found.") + def __init__(self, *args, **kwargs): + super(NotFound, self).__init__(**kwargs) + class FlagNotSet(NotFound): message = _("Required flag %(flag)s not set.") -- cgit From df77c6c168d4370ec582ffbccd43e3b9cb551b98 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 11:00:21 -0400 Subject: Commit without test data in migration. --- .../versions/042_update_instance_states.py | 43 +++------------------- 1 file changed, 5 insertions(+), 38 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py index e27b84176..10704d0da 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py @@ -17,15 +17,13 @@ import sqlalchemy from sqlalchemy import MetaData, Table, Column, String -from nova import log from nova.compute import task_states from nova.compute import vm_states -from nova.db.sqlalchemy import models -LOG = log.getLogger("farts") meta = MetaData() + c_task_state = Column('task_state', String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, @@ -94,37 +92,12 @@ _downgrade_translations = { } -def _insert_test_data(instance_table): - running_instance = models.Instance() - running_instance.state_description = "running" - stopped_instance = models.Instance() - stopped_instance.state_description = "stopped" - terminated_instance = models.Instance() - terminated_instance.state_description = "terminated" - migrating_instance = models.Instance() - migrating_instance.state_description = "migrating" - scheduling_instance = models.Instance() - scheduling_instance.state_description = "scheduling" - bad_instance = models.Instance() - bad_instance.state_description = "bad_state_description" - - instance_table.insert(running_instance).execute() - instance_table.insert(stopped_instance).execute() - instance_table.insert(terminated_instance).execute() - instance_table.insert(migrating_instance).execute() - instance_table.insert(scheduling_instance).execute() - instance_table.insert(bad_instance).execute() - - def upgrade(migrate_engine): - #migrate_engine.echo = True meta.bind = migrate_engine instance_table = Table('instances', meta, autoload=True, autoload_with=migrate_engine) - _insert_test_data(instance_table) - for instance in instance_table.select().execute(): - LOG.info(instance) + c_state = instance_table.c.state c_state.alter(name='power_state') @@ -148,9 +121,8 @@ def upgrade(migrate_engine): "WHERE vm_state=:old_state") migrate_engine.execute(update, **new_values) - for instance in instance_table.select().execute(): - LOG.info(instance) +def downgrade(migrate_engine): meta.bind = migrate_engine instance_table = Table('instances', meta, autoload=True, @@ -170,15 +142,10 @@ def upgrade(migrate_engine): "WHERE vm_state=:old_vm_state") migrate_engine.execute(update, locals()) - #c_state = instance_table.c.power_state + c_state = instance_table.c.power_state c_state.alter(name='state') - #c_vm_state = instance_table.c.vm_state + c_vm_state = instance_table.c.vm_state c_vm_state.alter(name='state_description') instance_table.drop_column('task_state') - - for instance in instance_table.select().execute(): - LOG.info(instance) - - raise Exception() -- cgit From 1ee1bda6cd164bd1e3cc400838830a747371ce9e Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 11:27:02 -0400 Subject: Conversion to SQLAlchemy-style. --- .../versions/042_update_instance_states.py | 55 +++++++++------------- 1 file changed, 21 insertions(+), 34 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py index 10704d0da..1005ee8a4 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py @@ -33,35 +33,35 @@ c_task_state = Column('task_state', _upgrade_translations = { "stopping": { - "vm_state": vm_states.ACTIVE, + "state_description": vm_states.ACTIVE, "task_state": task_states.STOPPING, }, "stopped": { - "vm_state": vm_states.STOPPED, + "state_description": vm_states.STOPPED, "task_state": None, }, "terminated": { - "vm_state": vm_states.DELETED, + "state_description": vm_states.DELETED, "task_state": None, }, "terminating": { - "vm_state": vm_states.ACTIVE, + "state_description": vm_states.ACTIVE, "task_state": task_states.DELETING, }, "running": { - "vm_state": vm_states.ACTIVE, + "state_description": vm_states.ACTIVE, "task_state": None, }, "scheduling": { - "vm_state": vm_states.BUILDING, + "state_description": vm_states.BUILDING, "task_state": task_states.SCHEDULING, }, "migrating": { - "vm_state": vm_states.MIGRATING, + "state_description": vm_states.MIGRATING, "task_state": None, }, "pending": { - "vm_state": vm_states.BUILDING, + "state_description": vm_states.BUILDING, "task_state": task_states.SCHEDULING, }, } @@ -107,19 +107,10 @@ def upgrade(migrate_engine): instance_table.create_column(c_task_state) for old_state, values in _upgrade_translations.iteritems(): - new_values = { - "old_state": old_state, - "vm_state": values["vm_state"], - "task_state": values["task_state"], - } - - update = sqlalchemy.text("UPDATE instances SET task_state=:task_state " - "WHERE vm_state=:old_state") - migrate_engine.execute(update, **new_values) - - update = sqlalchemy.text("UPDATE instances SET vm_state=:vm_state " - "WHERE vm_state=:old_state") - migrate_engine.execute(update, **new_values) + instance_table.update().\ + values(**values).\ + where(c_vm_state == old_state).\ + execute() def downgrade(migrate_engine): @@ -128,19 +119,7 @@ def downgrade(migrate_engine): instance_table = Table('instances', meta, autoload=True, autoload_with=migrate_engine) - for old_vm_state, old_task_states in _downgrade_translations.iteritems(): - for old_task_state, new_state_desc in old_task_states.iteritems(): - if old_task_state: - update = sqlalchemy.text("UPDATE instances " - "SET vm_state=:new_state_desc " - "WHERE task_state=:old_task_state " - "AND vm_state=:old_vm_state") - migrate_engine.execute(update, locals()) - else: - update = sqlalchemy.text("UPDATE instances " - "SET vm_state=:new_state_desc " - "WHERE vm_state=:old_vm_state") - migrate_engine.execute(update, locals()) + c_task_state = instance_table.c.task_state c_state = instance_table.c.power_state c_state.alter(name='state') @@ -148,4 +127,12 @@ def downgrade(migrate_engine): c_vm_state = instance_table.c.vm_state c_vm_state.alter(name='state_description') + for old_vm_state, old_task_states in _downgrade_translations.iteritems(): + for old_task_state, new_state_desc in old_task_states.iteritems(): + instance_table.update().\ + where(c_task_state == old_task_state).\ + where(c_vm_state == old_vm_state).\ + values(state_description=new_state_desc).\ + execute() + instance_table.drop_column('task_state') -- cgit From 53b0a2ea13e148fc5f461211ca9056b30db6c43d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 11:32:58 -0400 Subject: Fix for migrations. --- .../versions/042_update_instance_states.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py index 1005ee8a4..65bdf601d 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py @@ -33,35 +33,35 @@ c_task_state = Column('task_state', _upgrade_translations = { "stopping": { - "state_description": vm_states.ACTIVE, + "vm_state": vm_states.ACTIVE, "task_state": task_states.STOPPING, }, "stopped": { - "state_description": vm_states.STOPPED, + "vm_state": vm_states.STOPPED, "task_state": None, }, "terminated": { - "state_description": vm_states.DELETED, + "vm_state": vm_states.DELETED, "task_state": None, }, "terminating": { - "state_description": vm_states.ACTIVE, + "vm_state": vm_states.ACTIVE, "task_state": task_states.DELETING, }, "running": { - "state_description": vm_states.ACTIVE, + "vm_state": vm_states.ACTIVE, "task_state": None, }, "scheduling": { - "state_description": vm_states.BUILDING, + "vm_state": vm_states.BUILDING, "task_state": task_states.SCHEDULING, }, "migrating": { - "state_description": vm_states.MIGRATING, + "vm_state": vm_states.MIGRATING, "task_state": None, }, "pending": { - "state_description": vm_states.BUILDING, + "vm_state": vm_states.BUILDING, "task_state": task_states.SCHEDULING, }, } @@ -106,6 +106,9 @@ def upgrade(migrate_engine): instance_table.create_column(c_task_state) + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + for old_state, values in _upgrade_translations.iteritems(): instance_table.update().\ values(**values).\ -- cgit From 007efcab4b668e7a4b1d26ff274693824f6d7445 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 12:26:05 -0400 Subject: Attempt to fix issue when deleting an instance when it's still in BUILD. --- nova/compute/manager.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 0a1dc13be..802d141ef 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -429,10 +429,15 @@ class ComputeManager(manager.SchedulerDependentManager): return current_power_state = self._get_power_state(context, instance) + if current_power_state == power_state.RUNNING: + vm_state = vm_states.ACTIVE + else: + vm_state = vm_states.BUILDING + self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_states.ACTIVE, + vm_state=vm_state, task_state=None, launched_at=utils.utcnow()) -- cgit From ca8b3c7635208ab0776f51661708ecea1bfc222a Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 12:42:42 -0400 Subject: Fixed rebuild naming issue and reverted other fix which didn't fix anythin. --- nova/compute/api.py | 2 ++ nova/compute/manager.py | 7 +------ 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index eeb8f47d9..02bae7262 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1045,6 +1045,8 @@ class API(base.Base): metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" instance = db.api.instance_get(context, instance_id) + name = name or instance["display_name"] + invalid_rebuild_states = [ vm_states.BUILDING, vm_states.REBUILDING, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 802d141ef..0a1dc13be 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -429,15 +429,10 @@ class ComputeManager(manager.SchedulerDependentManager): return current_power_state = self._get_power_state(context, instance) - if current_power_state == power_state.RUNNING: - vm_state = vm_states.ACTIVE - else: - vm_state = vm_states.BUILDING - self._instance_update(context, instance_id, power_state=current_power_state, - vm_state=vm_state, + vm_state=vm_states.ACTIVE, task_state=None, launched_at=utils.utcnow()) -- cgit From 64f946a6a0a6e08d7046ab98776928abe24f8d93 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 13:01:20 -0400 Subject: Fix for trying rebuilds when instance is not active. --- nova/api/openstack/servers.py | 10 ++++------ nova/compute/api.py | 11 +++-------- nova/exception.py | 2 +- 3 files changed, 8 insertions(+), 15 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 4ff9264a6..c5fdda1cf 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -604,9 +604,8 @@ class ControllerV10(Controller): try: self.compute_api.rebuild(context, instance_id, image_id) - except exception.BuildInProgress: - msg = _("Instance %s is currently being rebuilt.") % instance_id - LOG.debug(msg) + except exception.RebuildRequiresActiveInstance: + msg = _("Instance %s must be active to rebuild.") % instance_id raise exc.HTTPConflict(explanation=msg) return webob.Response(status_int=202) @@ -742,9 +741,8 @@ class ControllerV11(Controller): try: self.compute_api.rebuild(context, instance_id, image_href, name, metadata, personalities) - except exception.BuildInProgress: - msg = _("Instance %s is currently being rebuilt.") % instance_id - LOG.debug(msg) + except exception.RebuildRequiresActiveInstance: + msg = _("Instance %s must be active to rebuild.") % instance_id raise exc.HTTPConflict(explanation=msg) return webob.Response(status_int=202) diff --git a/nova/compute/api.py b/nova/compute/api.py index 02bae7262..7ed41fbbc 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1047,14 +1047,9 @@ class API(base.Base): instance = db.api.instance_get(context, instance_id) name = name or instance["display_name"] - invalid_rebuild_states = [ - vm_states.BUILDING, - vm_states.REBUILDING, - ] - - if instance["vm_state"] in invalid_rebuild_states: - msg = _("Instance already building") - raise exception.BuildInProgress(msg) + if instance["vm_state"] != vm_states.ACTIVE: + msg = _("Instance must be active to rebuild.") + raise exception.RebuildRequiresActiveInstance(msg) files_to_inject = files_to_inject or [] metadata = metadata or {} diff --git a/nova/exception.py b/nova/exception.py index 66740019b..889d36c96 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -61,7 +61,7 @@ class ApiError(Error): super(ApiError, self).__init__(outstr) -class BuildInProgress(Error): +class RebuildRequiresActiveInstance(Error): pass -- cgit From 1c7002db8be430cded6efb7378103e17c8df21b4 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 13:24:37 -0400 Subject: Fixed issue where we were setting the state to DELETED before it's actually deleted. --- nova/compute/manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7697e8e6c..924799dc4 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -490,15 +490,15 @@ class ComputeManager(manager.SchedulerDependentManager): def terminate_instance(self, context, instance_id): """Terminate an instance on this host.""" self._shutdown_instance(context, instance_id, 'Terminating') - instance = self.db.instance_get(context.elevated(), instance_id) + self.db.instance_destroy(context, instance_id) + self._instance_update(context, instance_id, vm_state=vm_states.DELETED, task_state=None, terminated_at=utils.utcnow()) - # TODO(ja): should we keep it in a terminated state for a bit? - self.db.instance_destroy(context, instance_id) + instance = self.db.instance_get(context.elevated(), instance_id) usage_info = utils.usage_from_instance(instance) notifier.notify('compute.%s' % self.host, 'compute.instance.delete', -- cgit From a6bf7c0b2522509dda8dd5e537fad49665aa2af2 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 13:27:23 -0400 Subject: Added DELETED status to OSAPI just in case. --- nova/api/openstack/common.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 90b2095b8..07c6fbd11 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -76,6 +76,9 @@ _STATE_MAP = { vm_states.ERROR: { 'default': 'ERROR', }, + vm_states.DELETED: { + 'default': 'DELETED', + }, } -- cgit From 0343a328e66557abda9d0817558ad09a73962eb9 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 24 Aug 2011 14:39:47 -0700 Subject: change snapshot msg too --- nova/api/ec2/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 363dad7cd..1e176e52d 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -405,7 +405,7 @@ class Executor(wsgi.Application): LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), context=context) ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id']) - message = _('Snapshot %s not found') % ec2_id + message = ex.message % {'snapshot_id': ec2_id} return self._error(req, context, type(ex).__name__, message) except exception.NotFound as ex: LOG.info(_('NotFound raised: %s'), unicode(ex), context=context) -- cgit From 575f72693fa20c7c4157c8ce9702751cd54f1a82 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 18:00:03 -0400 Subject: Fixed silly ordering issue which was causing tons of test failures. --- nova/compute/manager.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 924799dc4..c29eef07f 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -490,15 +490,15 @@ class ComputeManager(manager.SchedulerDependentManager): def terminate_instance(self, context, instance_id): """Terminate an instance on this host.""" self._shutdown_instance(context, instance_id, 'Terminating') - self.db.instance_destroy(context, instance_id) - + instance = self.db.instance_get(context.elevated(), instance_id) self._instance_update(context, instance_id, vm_state=vm_states.DELETED, task_state=None, terminated_at=utils.utcnow()) - instance = self.db.instance_get(context.elevated(), instance_id) + self.db.instance_destroy(context, instance_id) + usage_info = utils.usage_from_instance(instance) notifier.notify('compute.%s' % self.host, 'compute.instance.delete', -- cgit From 5f1380bfc69913f6aeb2a64e3501f77973493bc3 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 18:14:13 -0400 Subject: Added fix for parallel build test. --- nova/tests/api/openstack/test_server_actions.py | 2 +- run_tests.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 0cbbe271d..eae74e75e 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -35,7 +35,7 @@ def return_server_with_attributes(**kwargs): return _return_server -def return_server_with_state(vm_state, task_state=None) +def return_server_with_state(vm_state, task_state=None): return return_server_with_attributes(vm_state=vm_state, task_state=task_state) diff --git a/run_tests.py b/run_tests.py index fd836967e..b9a74769e 100644 --- a/run_tests.py +++ b/run_tests.py @@ -55,6 +55,7 @@ To run a single test module: """ +import eventlet import gettext import heapq import os @@ -62,6 +63,7 @@ import unittest import sys import time +eventlet.monkey_patch() gettext.install('nova', unicode=1) from nose import config -- cgit From 6c4329f846685ee54c5265e5cc56e58e6fbd55e9 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 24 Aug 2011 18:25:21 -0400 Subject: stub_instance fix from merge conflict --- nova/tests/api/openstack/test_server_actions.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index eae74e75e..6fb21fad3 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -46,7 +46,8 @@ def return_server_with_uuid_and_state(vm_state, task_state=None): return _return_server -def stub_instance(id, metadata=None, image_ref="10", flavor_id="1", name=None): +def stub_instance(id, metadata=None, image_ref="10", flavor_id="1", + name=None, vm_state=None, task_state=None): if metadata is not None: metadata_items = [{'key':k, 'value':v} for k, v in metadata.items()] else: @@ -67,8 +68,8 @@ def stub_instance(id, metadata=None, image_ref="10", flavor_id="1", name=None): "launch_index": 0, "key_name": "", "key_data": "", - "vm_state": vm_states.ACTIVE, - "task_state": None, + "vm_state": vm_state or vm_states.ACTIVE, + "task_state": task_state, "memory_mb": 0, "vcpus": 0, "local_gb": 0, -- cgit From 48cd9689de31e408c792052747f714a9dbe1f8f7 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Wed, 24 Aug 2011 15:51:29 -0700 Subject: added virtio flag; associate address for VSA; cosmetic changes. Prior to volume_types merge --- bin/nova-manage | 4 -- bin/nova-vsa | 1 - nova/api/openstack/contrib/drive_types.py | 1 - .../openstack/contrib/virtual_storage_arrays.py | 49 +++++++++++++++++- .../migrate_repo/versions/037_add_vsa_data.py | 1 - nova/db/sqlalchemy/session.py | 2 - nova/network/linux_net.py | 1 + nova/scheduler/vsa.py | 1 - nova/tests/test_drive_types.py | 59 +++++++++++----------- nova/tests/test_vsa.py | 2 +- nova/virt/libvirt.xml.template | 4 +- nova/virt/libvirt/connection.py | 4 ++ nova/vsa/__init__.py | 1 - nova/vsa/api.py | 7 ++- nova/vsa/connection.py | 1 - nova/vsa/drive_types.py | 1 - nova/vsa/fake.py | 1 - nova/vsa/manager.py | 1 - 18 files changed, 88 insertions(+), 53 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 18a008d8c..d7636b811 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -64,9 +64,6 @@ import time from optparse import OptionParser - -import tempfile -import zipfile import ast # If ../nova/__init__.py exists, add ../ to Python search path, so that @@ -91,7 +88,6 @@ from nova import rpc from nova import utils from nova import version from nova.api.ec2 import ec2utils -from nova.api.ec2 import cloud from nova.auth import manager from nova.cloudpipe import pipelib from nova.compute import instance_types diff --git a/bin/nova-vsa b/bin/nova-vsa index 07f998117..d765e8f9e 100755 --- a/bin/nova-vsa +++ b/bin/nova-vsa @@ -3,7 +3,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/api/openstack/contrib/drive_types.py b/nova/api/openstack/contrib/drive_types.py index f2cbd3715..1aa65374f 100644 --- a/nova/api/openstack/contrib/drive_types.py +++ b/nova/api/openstack/contrib/drive_types.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index d6c4a5ef4..81dbc9e1f 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -24,6 +23,7 @@ from webob import exc from nova import vsa from nova import volume from nova import compute +from nova import network from nova import db from nova import quota from nova import exception @@ -103,6 +103,7 @@ class VsaController(object): def __init__(self): self.vsa_api = vsa.API() self.compute_api = compute.API() + self.network_api = network.API() super(VsaController, self).__init__() def _items(self, req, details): @@ -186,6 +187,48 @@ class VsaController(object): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) + def associate_address(self, req, id, body): + """ /zadr-vsa/{vsa_id}/associate_address + auto or manually associate an IP to VSA + """ + context = req.environ['nova.context'] + + if body is None: + ip = 'auto' + else: + ip = body.get('ipAddress', 'auto') + + LOG.audit(_("Associate address %(ip)s to VSA %(id)s"), + locals(), context=context) + + try: + instances = self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(id))}) + + if instances is None or len(instances)==0: + return faults.Fault(exc.HTTPNotFound()) + + for instance in instances: + self.network_api.allocate_for_instance(context, instance, vpn=False) + return + + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + def disassociate_address(self, req, id, body): + """ /zadr-vsa/{vsa_id}/disassociate_address + auto or manually associate an IP to VSA + """ + context = req.environ['nova.context'] + + if body is None: + ip = 'auto' + else: + ip = body.get('ipAddress', 'auto') + + LOG.audit(_("Disassociate address from VSA %(id)s"), + locals(), context=context) + class VsaVolumeDriveController(volumes.VolumeController): """The base class for VSA volumes & drives. @@ -515,7 +558,9 @@ class Virtual_storage_arrays(extensions.ExtensionDescriptor): VsaController(), collection_actions={'detail': 'GET'}, member_actions={'add_capacity': 'POST', - 'remove_capacity': 'POST'}) + 'remove_capacity': 'POST', + 'associate_address': 'POST', + 'disassociate_address': 'POST'}) resources.append(res) res = extensions.ResourceExtension('volumes', diff --git a/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py index 5a80f4e7a..8a57bd234 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/037_add_vsa_data.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 07f281938..c678cb543 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -30,11 +30,9 @@ import nova.exception import nova.flags import nova.log - FLAGS = nova.flags.FLAGS LOG = nova.log.getLogger("nova.db.sqlalchemy") - try: import MySQLdb except ImportError: diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 57c1d0c28..3de605ae2 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -508,6 +508,7 @@ def get_dhcp_hosts(context, network_ref): if network_ref['multi_host'] and FLAGS.host != host: continue hosts.append(_host_dhcp(fixed_ref)) + return '\n'.join(hosts) diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index 10c9b5a02..218ad5c7b 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/tests/test_drive_types.py b/nova/tests/test_drive_types.py index e91c41321..b52e6705b 100644 --- a/nova/tests/test_drive_types.py +++ b/nova/tests/test_drive_types.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -28,21 +27,21 @@ from nova import test from nova.vsa import drive_types FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.vsa') +LOG = logging.getLogger('nova.tests.test_drive_types') class DriveTypesTestCase(test.TestCase): """Test cases for driver types code""" def setUp(self): super(DriveTypesTestCase, self).setUp() - self.cntx = context.RequestContext(None, None) - self.cntx_admin = context.get_admin_context() - self._dtype = self._create_drive_type() + self.ctxt = context.RequestContext(None, None) + self.ctxt_admin = context.get_admin_context() + self._dtype = self._create_default_drive_type() def tearDown(self): self._dtype = None - def _create_drive_type(self): + def _create_default_drive_type(self): """Create a volume object.""" dtype = {} dtype['type'] = 'SATA' @@ -51,97 +50,97 @@ class DriveTypesTestCase(test.TestCase): dtype['capabilities'] = None dtype['visible'] = True - LOG.debug(_("Drive Type created %s"), dtype) + LOG.debug(_("Default values for Drive Type: %s"), dtype) return dtype def test_drive_type_create_delete(self): dtype = self._dtype - prev_all_dtypes = drive_types.get_all(self.cntx_admin, False) + prev_all_dtypes = drive_types.get_all(self.ctxt_admin, False) - new = drive_types.create(self.cntx_admin, **dtype) + new = drive_types.create(self.ctxt_admin, **dtype) for k, v in dtype.iteritems(): self.assertEqual(v, new[k], 'one of fields doesnt match') - new_all_dtypes = drive_types.get_all(self.cntx_admin, False) + new_all_dtypes = drive_types.get_all(self.ctxt_admin, False) self.assertNotEqual(len(prev_all_dtypes), len(new_all_dtypes), 'drive type was not created') - drive_types.delete(self.cntx_admin, new['id']) - new_all_dtypes = drive_types.get_all(self.cntx_admin, False) + drive_types.delete(self.ctxt_admin, new['id']) + new_all_dtypes = drive_types.get_all(self.ctxt_admin, False) self.assertEqual(prev_all_dtypes, new_all_dtypes, 'drive types was not deleted') def test_drive_type_check_name_generation(self): dtype = self._dtype - new = drive_types.create(self.cntx_admin, **dtype) + new = drive_types.create(self.ctxt_admin, **dtype) expected_name = FLAGS.drive_type_template_short % \ (dtype['type'], dtype['size_gb'], dtype['rpm']) self.assertEqual(new['name'], expected_name, 'name was not generated correctly') dtype['capabilities'] = 'SEC' - new2 = drive_types.create(self.cntx_admin, **dtype) + new2 = drive_types.create(self.ctxt_admin, **dtype) expected_name = FLAGS.drive_type_template_long % \ (dtype['type'], dtype['size_gb'], dtype['rpm'], dtype['capabilities']) self.assertEqual(new2['name'], expected_name, 'name was not generated correctly') - drive_types.delete(self.cntx_admin, new['id']) - drive_types.delete(self.cntx_admin, new2['id']) + drive_types.delete(self.ctxt_admin, new['id']) + drive_types.delete(self.ctxt_admin, new2['id']) def test_drive_type_create_delete_invisible(self): dtype = self._dtype dtype['visible'] = False - prev_all_dtypes = drive_types.get_all(self.cntx_admin, True) - new = drive_types.create(self.cntx_admin, **dtype) + prev_all_dtypes = drive_types.get_all(self.ctxt_admin, True) + new = drive_types.create(self.ctxt_admin, **dtype) - new_all_dtypes = drive_types.get_all(self.cntx_admin, True) + new_all_dtypes = drive_types.get_all(self.ctxt_admin, True) self.assertEqual(prev_all_dtypes, new_all_dtypes) - new_all_dtypes = drive_types.get_all(self.cntx_admin, False) + new_all_dtypes = drive_types.get_all(self.ctxt_admin, False) self.assertNotEqual(prev_all_dtypes, new_all_dtypes) - drive_types.delete(self.cntx_admin, new['id']) + drive_types.delete(self.ctxt_admin, new['id']) def test_drive_type_rename_update(self): dtype = self._dtype dtype['capabilities'] = None - new = drive_types.create(self.cntx_admin, **dtype) + new = drive_types.create(self.ctxt_admin, **dtype) for k, v in dtype.iteritems(): self.assertEqual(v, new[k], 'one of fields doesnt match') new_name = 'NEW_DRIVE_NAME' - new = drive_types.rename(self.cntx_admin, new['name'], new_name) + new = drive_types.rename(self.ctxt_admin, new['name'], new_name) self.assertEqual(new['name'], new_name) - new = drive_types.rename(self.cntx_admin, new_name) + new = drive_types.rename(self.ctxt_admin, new_name) expected_name = FLAGS.drive_type_template_short % \ (dtype['type'], dtype['size_gb'], dtype['rpm']) self.assertEqual(new['name'], expected_name) changes = {'rpm': 7200} - new = drive_types.update(self.cntx_admin, new['id'], **changes) + new = drive_types.update(self.ctxt_admin, new['id'], **changes) for k, v in changes.iteritems(): self.assertEqual(v, new[k], 'one of fields doesnt match') - drive_types.delete(self.cntx_admin, new['id']) + drive_types.delete(self.ctxt_admin, new['id']) def test_drive_type_get(self): dtype = self._dtype - new = drive_types.create(self.cntx_admin, **dtype) + new = drive_types.create(self.ctxt_admin, **dtype) - new2 = drive_types.get(self.cntx_admin, new['id']) + new2 = drive_types.get(self.ctxt_admin, new['id']) for k, v in new2.iteritems(): self.assertEqual(str(new[k]), str(new2[k]), 'one of fields doesnt match') - new2 = drive_types.get_by_name(self.cntx_admin, new['name']) + new2 = drive_types.get_by_name(self.ctxt_admin, new['name']) for k, v in new.iteritems(): self.assertEqual(str(new[k]), str(new2[k]), 'one of fields doesnt match') - drive_types.delete(self.cntx_admin, new['id']) + drive_types.delete(self.ctxt_admin, new['id']) diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py index cff23a800..726939744 100644 --- a/nova/tests/test_vsa.py +++ b/nova/tests/test_vsa.py @@ -113,7 +113,7 @@ class VsaTestCase(test.TestCase): self.assertRaises(exception.ApiError, self.vsa_api.create, self.context, **param) vsa_list2 = self.vsa_api.get_all(self.context) - self.assertEqual(len(vsa_list2), len(vsa_list1) + 1) + self.assertEqual(len(vsa_list2), len(vsa_list1)) param = {'storage': [{'drive_name': 'wrong name'}]} self.assertRaises(exception.ApiError, diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index 210e2b0fb..0b241120b 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -128,7 +128,9 @@ - +#if $getVar('use_virtio_for_bridges', True) + +#end if diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index e8a657bac..fb16aa57d 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -130,6 +130,9 @@ flags.DEFINE_string('libvirt_vif_type', 'bridge', flags.DEFINE_string('libvirt_vif_driver', 'nova.virt.libvirt.vif.LibvirtBridgeDriver', 'The libvirt VIF driver to configure the VIFs.') +flags.DEFINE_bool('libvirt_use_virtio_for_bridges', + False, + 'Use virtio for bridge interfaces') def get_connection(read_only): @@ -1047,6 +1050,7 @@ class LibvirtConnection(driver.ComputeDriver): 'ebs_root': ebs_root, 'local_device': local_device, 'volumes': block_device_mapping, + 'use_virtio_for_bridges': FLAGS.libvirt_use_virtio_for_bridges, 'ephemerals': ephemerals} root_device_name = driver.block_device_info_get_root(block_device_info) diff --git a/nova/vsa/__init__.py b/nova/vsa/__init__.py index 779b7fb65..09162e006 100644 --- a/nova/vsa/__init__.py +++ b/nova/vsa/__init__.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/vsa/api.py b/nova/vsa/api.py index 19185b907..bb6e93b87 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -194,9 +193,9 @@ class API(base.Base): volume_params = self._check_storage_parameters(context, vsa_name, storage, shared) except exception.ApiError: - self.update_vsa_status(context, vsa_id, - status=VsaState.FAILED) - raise + self.db.vsa_destroy(context, vsa_id) + raise exception.ApiError(_("Error in storage parameters: %s") + % storage) # after creating DB entry, re-check and set some defaults updates = {} diff --git a/nova/vsa/connection.py b/nova/vsa/connection.py index 5de8021a7..8ac8a1dd5 100644 --- a/nova/vsa/connection.py +++ b/nova/vsa/connection.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py index 3c67fdbb9..3cdbbfb09 100644 --- a/nova/vsa/drive_types.py +++ b/nova/vsa/drive_types.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/vsa/fake.py b/nova/vsa/fake.py index d96138255..0bb81484d 100644 --- a/nova/vsa/fake.py +++ b/nova/vsa/fake.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index d98d0fcb2..0f1718d38 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -2,7 +2,6 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. -# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain -- cgit From f0fcc4ba61b4658b1e28bd69cfcf395cc408496a Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 09:04:04 -0400 Subject: Another attempt at fixing hanging test. --- nova/tests/test_xenapi.py | 2 ++ run_tests.py | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 2f0559366..061e9ffea 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -24,6 +24,8 @@ import re import stubout import ast +eventlet.monkey_patch() + from nova import db from nova import context from nova import flags diff --git a/run_tests.py b/run_tests.py index b9a74769e..fd836967e 100644 --- a/run_tests.py +++ b/run_tests.py @@ -55,7 +55,6 @@ To run a single test module: """ -import eventlet import gettext import heapq import os @@ -63,7 +62,6 @@ import unittest import sys import time -eventlet.monkey_patch() gettext.install('nova', unicode=1) from nose import config -- cgit From 881fb85c9a74fc3436d07d3cf3876c2f815b5618 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 10:59:04 -0400 Subject: Set state to RESIZING during resizing... --- nova/compute/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 960d28bd0..89e44258c 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1165,7 +1165,7 @@ class API(base.Base): self.update(context, instance_id, - vm_state=vm_states.ACTIVE, + vm_state=vm_states.RESIZING, task_state=task_states.RESIZE_PREP) instance_ref = self._get_instance(context, instance_id, 'resize') -- cgit From 6e14007c09a465374d1b50cd00549c1be6dc536c Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 11:11:51 -0400 Subject: Removed RESIZE-CONFIRM hack. --- nova/api/openstack/views/servers.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 8f8d04ca0..b0daeb7a8 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -21,8 +21,6 @@ import hashlib import os from nova import exception -import nova.compute -import nova.context from nova.api.openstack import common from nova.api.openstack.views import addresses as addresses_view from nova.api.openstack.views import flavors as flavors_view @@ -70,12 +68,6 @@ class ViewBuilder(object): 'name': inst['display_name'], 'status': common.status_from_state(vm_state, task_state)} - ctxt = nova.context.get_admin_context() - compute_api = nova.compute.API() - - if compute_api.has_finished_migration(ctxt, inst['uuid']): - inst_dict['status'] = 'RESIZE-CONFIRM' - # Return the metadata as a dictionary metadata = {} for item in inst.get('metadata', []): -- cgit From 0e3986e71f4bbc848e81f18d6c3e6ad33ab3684c Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 11:12:53 -0400 Subject: Removed invalid test. --- nova/tests/api/openstack/test_server_actions.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 6fb21fad3..b9ef41465 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -244,19 +244,6 @@ class ServerActionsTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 500) - def test_resized_server_has_correct_status(self): - req = self.webreq('/1', 'GET') - - def fake_migration_get(*args): - return {} - - self.stubs.Set(nova.db, 'migration_get_by_instance_and_status', - fake_migration_get) - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - body = json.loads(res.body) - self.assertEqual(body['server']['status'], 'RESIZE-CONFIRM') - def test_confirm_resize_server(self): req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) -- cgit From 423a29ff347d3911ba1a98aa224e2a29bdbb8d4c Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 11:17:31 -0400 Subject: Set error state when migration prep fails. --- nova/compute/manager.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index c29eef07f..de43a5ced 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -929,8 +929,11 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get_by_uuid(context, instance_id) if instance_ref['host'] == FLAGS.host: - raise exception.Error(_( - 'Migration error: destination same as source!')) + self._instance_update(context, + instance_id, + vm_state=vm_states.ERROR) + msg = _('Migration error: destination same as source!') + raise exception.Error(msg) old_instance_type = self.db.instance_type_get(context, instance_ref['instance_type_id']) -- cgit From a14e2b1d8cbc87d5bcb31b9127035160fde4acc5 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 11:44:28 -0400 Subject: Verify resize needs to be set. --- nova/compute/api.py | 8 ++++---- nova/compute/manager.py | 5 +++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 89e44258c..47ad04930 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1089,8 +1089,8 @@ class API(base.Base): self.update(context, instance_id, - vm_state=vm_states.VERIFY_RESIZE, - task_state=task_states.RESIZE_REVERTING) + vm_state=vm_states.ACTIVE, + task_state=None) params = {'migration_id': migration_ref['id']} self._cast_compute_message('revert_resize', context, @@ -1115,8 +1115,8 @@ class API(base.Base): self.update(context, instance_id, - vm_state=vm_states.VERIFY_RESIZE, - task_state=task_states.RESIZE_CONFIRMING) + vm_state=vm_states.ACTIVE, + task_state=None) params = {'migration_id': migration_ref['id']} self._cast_compute_message('confirm_resize', context, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index de43a5ced..5427b896e 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1027,6 +1027,11 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.finish_migration(context, instance_ref, disk_info, network_info, resize_instance) + self._instance_update(context, + instance_id, + vm_state=vm_states.VERIFY_RESIZE, + task_state=None) + self.db.migration_update(context, migration_id, {'status': 'finished', }) -- cgit From 6758779249d490fd21bfdeae6d40adfc33d8cd17 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 12:50:36 -0400 Subject: Reverted two mistakes when looking over full diff. --- nova/api/ec2/cloud.py | 10 ++++------ nova/compute/manager.py | 2 -- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index c5a360426..cf9437b08 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -104,12 +104,10 @@ def state_description_from_vm_state(vm_state): # TODO(yamahata): hypervisor dependent default device name _DEFAULT_ROOT_DEVICE_NAME = '/dev/sda1' -_DEFAULT_MAPPINGS = { - 'ami': 'sda1', - 'ephemeral0': 'sda2', - 'root': _DEFAULT_ROOT_DEVICE_NAME, - 'swap': 'sda3', -} +_DEFAULT_MAPPINGS = {'ami': 'sda1', + 'ephemeral0': 'sda2', + 'root': _DEFAULT_ROOT_DEVICE_NAME, + 'swap': 'sda3'} def _parse_block_device_mapping(bdm): diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 5427b896e..a3ced1279 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -547,8 +547,6 @@ class ComputeManager(manager.SchedulerDependentManager): vm_state=vm_states.REBUILDING, task_state=task_states.BLOCK_DEVICE_MAPPING) - bd_mapping = self._setup_block_device_mapping(context, instance_id) - image_ref = kwargs.get('image_ref') instance_ref.image_ref = image_ref instance_ref.injected_files = kwargs.get('injected_files', []) -- cgit From d8d4aff908925b2f351e77291f4a8f394994063d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 16:38:38 -0400 Subject: Review feedback. --- nova/api/ec2/cloud.py | 8 +++++--- nova/api/openstack/common.py | 7 ++++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index cf9437b08..ac247a0ef 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -79,7 +79,9 @@ def _gen_key(context, user_id, key_name): return {'private_key': private_key, 'fingerprint': fingerprint} -# EC2 API: Valid Values: +# EC2 API can return the following values as documented in the EC2 API +# http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/ +# ApiReference-ItemType-InstanceStateType.html # pending | running | shutting-down | terminated | stopping | stopped _STATE_DESCRIPTION_MAP = { None: 'pending', @@ -1065,8 +1067,8 @@ class CloudController(object): result): vm_state = instance['vm_state'] state_to_value = { - vm_states.STOPPED: 'stop', - vm_states.DELETED: 'terminate', + vm_states.STOPPED: 'stopped', + vm_states.DELETED: 'terminated', } value = state_to_value.get(vm_state) if value: diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 07c6fbd11..bdbae0271 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -84,10 +84,11 @@ _STATE_MAP = { def status_from_state(vm_state, task_state='default'): """Given vm_state and task_state, return a status string.""" - LOG.debug("Generating status for vm_state=%(vm_state)s " - "task_state=%(task_state)s." % locals()) task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN_STATE')) - return task_map.get(task_state, task_map['default']) + status = task_map.get(task_state, task_map['default']) + LOG.debug("Generated %(status)s from vm_state=%(vm_state)s " + "task_state=%(task_state)s." % locals()) + return status def vm_state_from_status(status): -- cgit From ae1ac682673648f2a2f364eabd525985f3d16a9d Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 16:40:15 -0400 Subject: Bumped migration number. --- .../versions/042_update_instance_states.py | 141 --------------------- .../versions/043_update_instance_states.py | 141 +++++++++++++++++++++ 2 files changed, 141 insertions(+), 141 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py deleted file mode 100644 index 65bdf601d..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_update_instance_states.py +++ /dev/null @@ -1,141 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy -from sqlalchemy import MetaData, Table, Column, String - -from nova.compute import task_states -from nova.compute import vm_states - - -meta = MetaData() - - -c_task_state = Column('task_state', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - - -_upgrade_translations = { - "stopping": { - "vm_state": vm_states.ACTIVE, - "task_state": task_states.STOPPING, - }, - "stopped": { - "vm_state": vm_states.STOPPED, - "task_state": None, - }, - "terminated": { - "vm_state": vm_states.DELETED, - "task_state": None, - }, - "terminating": { - "vm_state": vm_states.ACTIVE, - "task_state": task_states.DELETING, - }, - "running": { - "vm_state": vm_states.ACTIVE, - "task_state": None, - }, - "scheduling": { - "vm_state": vm_states.BUILDING, - "task_state": task_states.SCHEDULING, - }, - "migrating": { - "vm_state": vm_states.MIGRATING, - "task_state": None, - }, - "pending": { - "vm_state": vm_states.BUILDING, - "task_state": task_states.SCHEDULING, - }, -} - - -_downgrade_translations = { - vm_states.ACTIVE: { - None: "running", - task_states.DELETING: "terminating", - task_states.STOPPING: "stopping", - }, - vm_states.BUILDING: { - None: "pending", - task_states.SCHEDULING: "scheduling", - }, - vm_states.STOPPED: { - None: "stopped", - }, - vm_states.REBUILDING: { - None: "pending", - }, - vm_states.DELETED: { - None: "terminated", - }, - vm_states.MIGRATING: { - None: "migrating", - }, -} - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instance_table.c.state - c_state.alter(name='power_state') - - c_vm_state = instance_table.c.state_description - c_vm_state.alter(name='vm_state') - - instance_table.create_column(c_task_state) - - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - for old_state, values in _upgrade_translations.iteritems(): - instance_table.update().\ - values(**values).\ - where(c_vm_state == old_state).\ - execute() - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_task_state = instance_table.c.task_state - - c_state = instance_table.c.power_state - c_state.alter(name='state') - - c_vm_state = instance_table.c.vm_state - c_vm_state.alter(name='state_description') - - for old_vm_state, old_task_states in _downgrade_translations.iteritems(): - for old_task_state, new_state_desc in old_task_states.iteritems(): - instance_table.update().\ - where(c_task_state == old_task_state).\ - where(c_vm_state == old_vm_state).\ - values(state_description=new_state_desc).\ - execute() - - instance_table.drop_column('task_state') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py new file mode 100644 index 000000000..65bdf601d --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py @@ -0,0 +1,141 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy +from sqlalchemy import MetaData, Table, Column, String + +from nova.compute import task_states +from nova.compute import vm_states + + +meta = MetaData() + + +c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +_upgrade_translations = { + "stopping": { + "vm_state": vm_states.ACTIVE, + "task_state": task_states.STOPPING, + }, + "stopped": { + "vm_state": vm_states.STOPPED, + "task_state": None, + }, + "terminated": { + "vm_state": vm_states.DELETED, + "task_state": None, + }, + "terminating": { + "vm_state": vm_states.ACTIVE, + "task_state": task_states.DELETING, + }, + "running": { + "vm_state": vm_states.ACTIVE, + "task_state": None, + }, + "scheduling": { + "vm_state": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, + "migrating": { + "vm_state": vm_states.MIGRATING, + "task_state": None, + }, + "pending": { + "vm_state": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, +} + + +_downgrade_translations = { + vm_states.ACTIVE: { + None: "running", + task_states.DELETING: "terminating", + task_states.STOPPING: "stopping", + }, + vm_states.BUILDING: { + None: "pending", + task_states.SCHEDULING: "scheduling", + }, + vm_states.STOPPED: { + None: "stopped", + }, + vm_states.REBUILDING: { + None: "pending", + }, + vm_states.DELETED: { + None: "terminated", + }, + vm_states.MIGRATING: { + None: "migrating", + }, +} + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instance_table.c.state + c_state.alter(name='power_state') + + c_vm_state = instance_table.c.state_description + c_vm_state.alter(name='vm_state') + + instance_table.create_column(c_task_state) + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + for old_state, values in _upgrade_translations.iteritems(): + instance_table.update().\ + values(**values).\ + where(c_vm_state == old_state).\ + execute() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_task_state = instance_table.c.task_state + + c_state = instance_table.c.power_state + c_state.alter(name='state') + + c_vm_state = instance_table.c.vm_state + c_vm_state.alter(name='state_description') + + for old_vm_state, old_task_states in _downgrade_translations.iteritems(): + for old_task_state, new_state_desc in old_task_states.iteritems(): + instance_table.update().\ + where(c_task_state == old_task_state).\ + where(c_vm_state == old_vm_state).\ + values(state_description=new_state_desc).\ + execute() + + instance_table.drop_column('task_state') -- cgit From c316782f8879ef321c4545b04bc9d24e11bb4ee6 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 17:27:10 -0400 Subject: review feedback --- nova/api/ec2/cloud.py | 3 +-- nova/api/openstack/common.py | 7 ++----- nova/compute/manager.py | 12 ++++++------ nova/compute/task_states.py | 21 +++++++++++++++------ nova/compute/vm_states.py | 9 +++++++-- nova/db/sqlalchemy/api.py | 40 ---------------------------------------- 6 files changed, 31 insertions(+), 61 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index ac247a0ef..fe44191c8 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -92,7 +92,6 @@ _STATE_DESCRIPTION_MAP = { vm_states.STOPPED: 'stopped', vm_states.MIGRATING: 'migrate', vm_states.RESIZING: 'resize', - vm_states.VERIFY_RESIZE: 'verify_resize', vm_states.PAUSED: 'pause', vm_states.SUSPENDED: 'suspend', vm_states.RESCUED: 'rescue', @@ -101,7 +100,7 @@ _STATE_DESCRIPTION_MAP = { def state_description_from_vm_state(vm_state): """Map the vm state to the server status string""" - return _STATE_DESCRIPTION_MAP[vm_state] + return _STATE_DESCRIPTION_MAP.get(vm_state, vm_state) # TODO(yamahata): hypervisor dependent default device name diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index bdbae0271..d743a66ef 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -43,8 +43,8 @@ _STATE_MAP = { vm_states.ACTIVE: { 'default': 'ACTIVE', task_states.REBOOTING: 'REBOOT', - task_states.HARD_REBOOTING: 'HARD_REBOOT', - task_states.PASSWORD: 'PASSWORD', + task_states.UPDATING_PASSWORD: 'PASSWORD', + task_states.RESIZE_VERIFY: 'VERIFY_RESIZE', }, vm_states.BUILDING: { 'default': 'BUILD', @@ -61,9 +61,6 @@ _STATE_MAP = { vm_states.RESIZING: { 'default': 'RESIZE', }, - vm_states.VERIFY_RESIZE: { - 'default': 'VERIFY_RESIZE', - }, vm_states.PAUSED: { 'default': 'PAUSED', }, diff --git a/nova/compute/manager.py b/nova/compute/manager.py index a3ced1279..b4c6abae0 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -415,7 +415,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.BUILDING, - task_state=task_states.SPAWN) + task_state=task_states.SPAWNING) # TODO(vish) check to make sure the availability zone matches try: @@ -557,7 +557,7 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, vm_state=vm_states.REBUILDING, - task_state=task_states.SPAWN) + task_state=task_states.SPAWNING) # pull in new password here since the original password isn't in the db instance_ref.admin_pass = kwargs.get('new_pass', @@ -629,9 +629,9 @@ class ComputeManager(manager.SchedulerDependentManager): None if rotation shouldn't be used (as in the case of snapshots) """ if image_type == "snapshot": - task_state = task_states.SNAPSHOTTING + task_state = task_states.IMAGE_SNAPSHOT elif image_type == "backup": - task_state = task_states.BACKING_UP + task_state = task_states.IMAGE_BACKUP else: raise Exception(_('Image type not recognized %s') % image_type) @@ -1027,8 +1027,8 @@ class ComputeManager(manager.SchedulerDependentManager): self._instance_update(context, instance_id, - vm_state=vm_states.VERIFY_RESIZE, - task_state=None) + vm_state=vm_states.ACTIVE, + task_state=task_states.RESIZE_VERIFY) self.db.migration_update(context, migration_id, {'status': 'finished', }) diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py index 5f78495ea..e3315a542 100644 --- a/nova/compute/task_states.py +++ b/nova/compute/task_states.py @@ -15,16 +15,25 @@ # License for the specific language governing permissions and limitations # under the License. -"""Possible task states for instances""" +"""Possible task states for instances. + +Compute instance task states represent what is happening to the instance at the +current moment. These tasks can be generic, such as 'spawning', or specific, +such as 'block_device_mapping'. These task states allow for a better view into +what an instance is doing and should be displayed to users/administrators as +necessary. + +""" SCHEDULING = 'scheduling' BLOCK_DEVICE_MAPPING = 'block_device_mapping' NETWORKING = 'networking' -SPAWN = 'spawn' +SPAWNING = 'spawning' + +IMAGE_SNAPSHOT = 'image_snapshot' +IMAGE_BACKUP = 'image_backup' -SNAPSHOTTING = 'snapshotting' -BACKING_UP = 'backing_up' -PASSWORD = 'password' +UPDATING_PASSWORD = 'updating_password' RESIZE_PREP = 'resize_prep' RESIZE_MIGRATING = 'resize_migrating' @@ -32,11 +41,11 @@ RESIZE_MIGRATED = 'resize_migrated' RESIZE_FINISH = 'resize_finish' RESIZE_REVERTING = 'resize_reverting' RESIZE_CONFIRMING = 'resize_confirming' +RESIZE_VERIFY = 'resize_verify' REBUILDING = 'rebuilding' REBOOTING = 'rebooting' -HARD_REBOOTING = 'hard_rebooting' PAUSING = 'pausing' UNPAUSING = 'unpausing' SUSPENDING = 'suspending' diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py index 560e6d688..6f16c1f09 100644 --- a/nova/compute/vm_states.py +++ b/nova/compute/vm_states.py @@ -15,7 +15,13 @@ # License for the specific language governing permissions and limitations # under the License. -"""Possible vm states for instances""" +"""Possible vm states for instances. + +Compute instance vm states represent the state of an instance as it pertains to +a user or administrator. When combined with task states (task_states.py), a +better picture can be formed regarding the instance's health. + +""" ACTIVE = 'active' BUILDING = 'building' @@ -29,6 +35,5 @@ STOPPED = 'stopped' MIGRATING = 'migrating' RESIZING = 'resizing' -VERIFY_RESIZE = 'verify_resize' ERROR = 'error' diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 08fc81759..7b78e286d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1484,46 +1484,6 @@ def instance_get_floating_address(context, instance_id): return fixed_ip_refs[0].floating_ips[0]['address'] -@require_admin_context -def instance_set_power_state(context, instance_id, power_state): - session = get_session() - partial = session.query(models.Instance) - - if utils.is_uuid_like(instance_id): - result = partial.filter_by(uuid=instance_id) - else: - result = partial.filter_by(id=instance_id) - - result.update({'power_state': power_state}) - - -@require_admin_context -def instance_set_vm_state(context, instance_id, vm_state): - # vm_state = running, halted, suspended, paused - session = get_session() - partial = session.query(models.Instance) - - if utils.is_uuid_like(instance_id): - result = partial.filter_by(uuid=instance_id) - else: - result = partial.filter_by(id=instance_id) - - result.update({'vm_state': vm_state}) - - -def instance_set_task_state(context, instance_id, task_state): - # task_state = running, halted, suspended, paused - session = get_session() - partial = session.query(models.Instance) - - if utils.is_uuid_like(instance_id): - result = partial.filter_by(uuid=instance_id) - else: - result = partial.filter_by(id=instance_id) - - result.update({'task_state': task_state}) - - @require_context def instance_update(context, instance_id, values): session = get_session() -- cgit From 2cf0b67e08e1608bd717ffadd41d5029db2b4a3a Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Thu, 25 Aug 2011 21:56:45 +0000 Subject: Fix glance image authorization check now that glance can do authorization checks on its own; use correct image service when looking for ramdisk, etc.; fix a couple of PEP8 errors --- nova/api/openstack/create_instance_helper.py | 6 +++--- nova/image/glance.py | 14 ++++++++++++++ nova/ipv6/account_identifier.py | 3 ++- nova/tests/test_ipv6.py | 2 +- 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 483ff4985..c428a8209 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -98,7 +98,7 @@ class CreateInstanceHelper(object): try: image_service, image_id = nova.image.get_image_service(image_href) kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( - req, image_id) + req, image_service, image_id) images = set([str(x['id']) for x in image_service.index(context)]) assert str(image_id) in images except Exception, e: @@ -248,12 +248,12 @@ class CreateInstanceHelper(object): msg = _("Server name is an empty string") raise exc.HTTPBadRequest(explanation=msg) - def _get_kernel_ramdisk_from_image(self, req, image_id): + def _get_kernel_ramdisk_from_image(self, req, image_service, image_id): """Fetch an image from the ImageService, then if present, return the associated kernel and ramdisk image IDs. """ context = req.environ['nova.context'] - image_meta = self._image_service.show(context, image_id) + image_meta = image_service.show(context, image_id) # NOTE(sirp): extracted to a separate method to aid unit-testing, the # new method doesn't need a request obj or an ImageService stub kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image( diff --git a/nova/image/glance.py b/nova/image/glance.py index 9060f6a91..16f803218 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -269,6 +269,20 @@ class GlanceImageService(service.BaseImageService): image_meta = _convert_from_string(image_meta) return image_meta + @staticmethod + def _is_image_available(context, image_meta): + """Check image availability. + + Under Glance, images are always available if the context has + an auth_token. Otherwise, we fall back to the superclass + method. + + """ + if hasattr(context, 'auth_token') and context.auth_token: + return True + return service.BaseImageService._is_image_available(context, + image_meta) + # utility functions def _convert_timestamps_to_datetimes(image_meta): diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py index 27bb01988..8a08510ac 100644 --- a/nova/ipv6/account_identifier.py +++ b/nova/ipv6/account_identifier.py @@ -39,7 +39,8 @@ def to_global(prefix, mac, project_id): except TypeError: raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix) except NameError: - raise TypeError(_('Bad project_id for to_global_ipv6: %s') % project_id) + raise TypeError(_('Bad project_id for to_global_ipv6: %s') % + project_id) def to_mac(ipv6_address): diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index 04c1b5598..e1ba4aafb 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -48,7 +48,7 @@ class IPv6RFC2462TestCase(test.TestCase): def test_to_global_with_bad_prefix(self): bad_prefix = '82' self.assertRaises(TypeError, ipv6.to_global, - bad_prefix, + bad_prefix, '2001:db8::216:3eff:fe33:4455', 'test') -- cgit From 63b26178407423524390b2a47425b6953c910e00 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Thu, 25 Aug 2011 18:00:32 -0400 Subject: Test fixup after last review feedback commit. --- nova/tests/api/openstack/test_servers.py | 11 ++++------- nova/tests/test_cloud.py | 2 +- nova/tests/test_xenapi.py | 2 -- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3905e4f7a..f75263c45 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -2488,11 +2488,6 @@ class TestServerStatus(test.TestCase): task_states.REBOOTING) self.assertEqual(response['server']['status'], 'REBOOT') - def test_hard_reboot(self): - response = self._get_with_state(vm_states.ACTIVE, - task_states.HARD_REBOOTING) - self.assertEqual(response['server']['status'], 'HARD_REBOOT') - def test_rebuild(self): response = self._get_with_state(vm_states.REBUILDING) self.assertEqual(response['server']['status'], 'REBUILD') @@ -2506,11 +2501,13 @@ class TestServerStatus(test.TestCase): self.assertEqual(response['server']['status'], 'RESIZE') def test_verify_resize(self): - response = self._get_with_state(vm_states.VERIFY_RESIZE) + response = self._get_with_state(vm_states.ACTIVE, + task_states.RESIZE_VERIFY) self.assertEqual(response['server']['status'], 'VERIFY_RESIZE') def test_password_update(self): - response = self._get_with_state(vm_states.ACTIVE, task_states.PASSWORD) + response = self._get_with_state(vm_states.ACTIVE, + task_states.UPDATING_PASSWORD) self.assertEqual(response['server']['status'], 'PASSWORD') def test_stopped(self): diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 9d58b7341..9deb5c011 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -1611,7 +1611,7 @@ class CloudTestCase(test.TestCase): self.assertEqual(groupSet, expected_groupSet) self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'), {'instance_id': 'i-12345678', - 'instanceInitiatedShutdownBehavior': 'stop'}) + 'instanceInitiatedShutdownBehavior': 'stopped'}) self.assertEqual(get_attribute('instanceType'), {'instance_id': 'i-12345678', 'instanceType': 'fake_type'}) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 061e9ffea..2f0559366 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -24,8 +24,6 @@ import re import stubout import ast -eventlet.monkey_patch() - from nova import db from nova import context from nova import flags -- cgit From 4834b920e3186712ab56e65a88c2e8c838d16f9c Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 25 Aug 2011 18:38:35 -0700 Subject: VSA code redesign. Drive types completely replaced by Volume types --- bin/nova-manage | 206 ++++++++++++--------- bin/nova-vsa | 10 +- nova/api/openstack/contrib/drive_types.py | 143 -------------- .../openstack/contrib/virtual_storage_arrays.py | 54 +++--- nova/db/api.py | 45 ----- nova/db/sqlalchemy/api.py | 153 +-------------- .../migrate_repo/versions/042_add_vsa_data.py | 133 ------------- .../migrate_repo/versions/043_add_vsa_data.py | 75 ++++++++ nova/db/sqlalchemy/models.py | 43 +---- nova/db/sqlalchemy/session.py | 2 + nova/exception.py | 12 -- nova/log.py | 2 +- nova/network/linux_net.py | 1 - nova/quota.py | 5 +- nova/scheduler/vsa.py | 68 ++++--- .../api/openstack/contrib/test_drive_types.py | 192 ------------------- nova/tests/api/openstack/contrib/test_vsa.py | 79 ++++---- nova/tests/api/openstack/test_extensions.py | 3 +- nova/tests/scheduler/test_vsa_scheduler.py | 68 ++++--- nova/tests/test_drive_types.py | 146 --------------- nova/tests/test_vsa.py | 49 +++-- nova/tests/test_vsa_volumes.py | 77 ++++---- nova/tests/test_xenapi.py | 1 + nova/virt/libvirt/connection.py | 5 +- nova/volume/api.py | 63 +++---- nova/volume/driver.py | 74 ++++++-- nova/volume/manager.py | 27 ++- nova/volume/san.py | 12 +- nova/volume/volume_types.py | 43 ++++- nova/vsa/api.py | 148 ++++++--------- nova/vsa/drive_types.py | 114 ------------ nova/vsa/fake.py | 2 +- nova/vsa/manager.py | 52 +++--- nova/vsa/utils.py | 80 ++++++++ 34 files changed, 761 insertions(+), 1426 deletions(-) delete mode 100644 nova/api/openstack/contrib/drive_types.py delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/042_add_vsa_data.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py delete mode 100644 nova/tests/api/openstack/contrib/test_drive_types.py delete mode 100644 nova/tests/test_drive_types.py delete mode 100644 nova/vsa/drive_types.py create mode 100644 nova/vsa/utils.py diff --git a/bin/nova-manage b/bin/nova-manage index bd2d43139..977ad5c66 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -53,6 +53,7 @@ CLI interface for nova management. """ +import ast import gettext import glob import json @@ -64,8 +65,6 @@ import time from optparse import OptionParser -import ast - # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), @@ -87,15 +86,13 @@ from nova import quota from nova import rpc from nova import utils from nova import version +from nova import vsa from nova.api.ec2 import ec2utils from nova.auth import manager from nova.cloudpipe import pipelib from nova.compute import instance_types from nova.db import migration -from nova import compute -from nova import volume -from nova import vsa -from nova.vsa import drive_types +from nova.volume import volume_types FLAGS = flags.FLAGS flags.DECLARE('fixed_range', 'nova.network.manager') @@ -1076,14 +1073,12 @@ class VsaCommands(object): def __init__(self, *args, **kwargs): self.manager = manager.AuthManager() self.vsa_api = vsa.API() - self.compute_api = compute.API() - self.volume_api = volume.API() self.context = context.get_admin_context() self._format_str_vsa = "%-5s %-15s %-25s %-10s %-6s "\ "%-9s %-10s %-10s %10s" self._format_str_volume = "\t%-4s %-15s %-5s %-10s %-20s %s" - self._format_str_drive = "\t%-4s %-15s %-5s %-10s %-20s %s" + self._format_str_drive = "\t%-4s %-15s %-5s %-10s %-20s %-4s %-10s %s" self._format_str_instance = "\t%-4s %-10s %-20s %-12s %-10s "\ "%-15s %-15s %-10s %-15s %s" @@ -1124,7 +1119,7 @@ class VsaCommands(object): def _print_volume(self, vol): print self._format_str_volume %\ (vol['id'], - vol['display_name'], + vol['display_name'] or vol['name'], vol['size'], vol['status'], vol['attach_status'], @@ -1138,15 +1133,24 @@ class VsaCommands(object): _('size'), _('status'), _('host'), + _('type'), + _('typeName'), _('createTime')) def _print_drive(self, drive): - print self._format_str_volume %\ + if drive['volume_type_id'] is not None and drive.get('volume_type'): + drive_type_name = drive['volume_type'].get('name') + else: + drive_type_name = '' + + print self._format_str_drive %\ (drive['id'], drive['display_name'], drive['size'], drive['status'], drive['host'], + drive['volume_type_id'], + drive_type_name, str(drive['created_at'])) def _print_instance_header(self): @@ -1196,9 +1200,7 @@ class VsaCommands(object): vsa_id = vsa.get('id') if print_instances: - instances = self.compute_api.get_all(context, - search_opts={'metadata': - dict(vsa_id=str(vsa_id))}) + instances = self.vsa_api.get_all_vsa_instances(context, vsa_id) if instances: print self._print_instance_header() @@ -1207,8 +1209,7 @@ class VsaCommands(object): print if print_drives: - drives = self.volume_api.get_all_by_vsa(context, - vsa_id, "to") + drives = self.vsa_api.get_all_vsa_drives(context, vsa_id) if drives: self._print_drive_header() for drive in drives: @@ -1216,8 +1217,7 @@ class VsaCommands(object): print if print_volumes: - volumes = self.volume_api.get_all_by_vsa(context, - vsa_id, "from") + volumes = self.vsa_api.get_all_vsa_volumes(context, vsa_id) if volumes: self._print_volume_header() for volume in volumes: @@ -1344,7 +1344,7 @@ class VsaCommands(object): @args('--id', dest='vsa_id', metavar="", help='VSA ID (optional)') - @args('--all', dest='all', action="store_true", + @args('--all', dest='all', action="store_true", default=False, help='Show all available details') @args('--drives', dest='drives', action="store_true", help='Include drive-level details') @@ -1384,6 +1384,7 @@ class VsaDriveTypeCommands(object): def __init__(self, *args, **kwargs): super(VsaDriveTypeCommands, self).__init__(*args, **kwargs) self.context = context.get_admin_context() + self._drive_type_template = '%s_%sGB_%sRPM' def _list(self, drives): format_str = "%-5s %-30s %-10s %-10s %-10s %-20s %-10s %s" @@ -1398,75 +1399,94 @@ class VsaDriveTypeCommands(object): _('visible'), _('createTime')) - for drive in drives: + for name, vol_type in drives.iteritems(): + drive = vol_type.get('extra_specs') print format_str %\ - (str(drive['id']), - drive['name'], - drive['type'], - str(drive['size_gb']), - drive['rpm'], - drive['capabilities'], - str(drive['visible']), - str(drive['created_at'])) + (str(vol_type['id']), + drive['drive_name'], + drive['drive_type'], + drive['drive_size'], + drive['drive_rpm'], + drive.get('capabilities', ''), + str(drive.get('visible', '')), + str(vol_type['created_at'])) @args('--type', dest='type', metavar="", help='Drive type (SATA, SAS, SSD, etc.)') @args('--size', dest='size_gb', metavar="", help='Drive size in GB') @args('--rpm', dest='rpm', metavar="", help='RPM') - @args('--capabilities', dest='capabilities', metavar="", - help='Different capabilities') - @args('--visible', dest='visible', metavar="", + @args('--capabilities', dest='capabilities', default=None, + metavar="", help='Different capabilities') + @args('--hide', dest='hide', action="store_true", default=False, help='Show or hide drive') @args('--name', dest='name', metavar="", help='Drive name') - def create(self, type, size_gb, rpm, capabilities='', - visible=None, name=None): + def create(self, type, size_gb, rpm, capabilities=None, + hide=False, name=None): """Create drive type.""" - if visible in [None, "--show", "show"]: - visible = True - elif visible in ["--hide", "hide"]: - visible = False - else: - raise ValueError(_('Visible parameter should be set to --show '\ - 'or --hide')) + hide = True if hide in [True, "True", "--hide", "hide"] else False - result = drive_types.create(self.context, - type, int(size_gb), rpm, - capabilities, visible, name) - self._list([result]) + if name is None: + name = self._drive_type_template % (type, size_gb, rpm) - @args('--name', dest='name', metavar="", help='Drive name') - def delete(self, name): - """Delete drive type.""" + extra_specs = {'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': type, + 'drive_size': size_gb, + 'drive_rpm': rpm, + 'visible': True, + } + if hide: + extra_specs['visible'] = False - dtype = drive_types.get_by_name(self.context, name) - drive_types.delete(self.context, dtype['id']) + if capabilities is not None and capabilities != '': + extra_specs['capabilities'] = capabilities - @args('--name', dest='name', metavar="", help='Drive name') - @args('--new_name', dest='new_name', metavar="", - help='New Drive name (optional)') - def rename(self, name, new_name=None): - """Rename drive type.""" + volume_types.create(self.context, name, extra_specs) + result = volume_types.get_volume_type_by_name(self.context, name) + self._list({name: result}) - dtype = drive_types.rename(self.context, - name, new_name) - self._list([dtype]) + @args('--name', dest='name', metavar="", help='Drive name') + @args('--purge', action="store_true", dest='purge', default=False, + help='purge record from database') + def delete(self, name, purge): + """Marks instance types / flavors as deleted""" + try: + if purge: + volume_types.purge(self.context, name) + verb = "purged" + else: + volume_types.destroy(self.context, name) + verb = "deleted" + except exception.ApiError: + print "Valid volume type name is required" + sys.exit(1) + except exception.DBError, e: + print "DB Error: %s" % e + sys.exit(2) + except: + sys.exit(3) + else: + print "%s %s" % (name, verb) - @args('--all', dest='visible', action="store_false", - help='Show all drives') + @args('--all', dest='all', action="store_true", default=False, + help='Show all drives (including invisible)') @args('--name', dest='name', metavar="", help='Show only specified drive') - def list(self, visible=None, name=None): + def list(self, all=False, name=None): """Describe all available VSA drive types (or particular one).""" - visible = False if visible in ["--all", False] else True + all = False if all in ["--all", False, "False"] else True + search_opts = {'extra_specs': {'type': 'vsa_drive'}} if name is not None: - drive = drive_types.get_by_name(self.context, name) - drives = [drive] - else: - drives = drive_types.get_all(self.context, visible) + search_opts['extra_specs']['name'] = name + if all == False: + search_opts['extra_specs']['visible'] = '1' + + drives = volume_types.get_all_types(self.context, + search_opts=search_opts) self._list(drives) @args('--name', dest='name', metavar="", help='Drive name') @@ -1474,32 +1494,44 @@ class VsaDriveTypeCommands(object): help='Drive type (SATA, SAS, SSD, etc.)') @args('--size', dest='size_gb', metavar="", help='Drive size in GB') @args('--rpm', dest='rpm', metavar="", help='RPM') - @args('--capabilities', dest='capabilities', metavar="", - help='Different capabilities') - @args('--visible', dest='visible', metavar="", - help='Show or hide drive') + @args('--capabilities', dest='capabilities', default=None, + metavar="", help='Different capabilities') + @args('--visible', dest='visible', + metavar="", help='Show or hide drive') def update(self, name, type=None, size_gb=None, rpm=None, - capabilities='', visible=None): + capabilities=None, visible=None): """Update drive type.""" - values = { - 'type': type, - 'size_gb': size_gb, - 'rpm': rpm, - 'capabilities': capabilities, - } - if visible: - if visible in ["--show", "show"]: - values['visible'] = True - elif visible in ["--hide", "hide"]: - values['visible'] = False - else: - raise ValueError(_("Visible parameter should be set to "\ - "--show or --hide")) + volume_type = volume_types.get_volume_type_by_name(self.context, name) + + extra_specs = {'type': 'vsa_drive'} - dtype = drive_types.get_by_name(self.context, name) - dtype = drive_types.update(self.context, dtype['id'], **values) - self._list([dtype]) + if type: + extra_specs['drive_type'] = type + + if size_gb: + extra_specs['drive_size'] = size_gb + + if rpm: + extra_specs['drive_rpm'] = rpm + + if capabilities: + extra_specs['capabilities'] = capabilities + + if visible is not None: + if visible in ["show", True, "True"]: + extra_specs['visible'] = True + elif visible in ["hide", False, "False"]: + extra_specs['visible'] = False + else: + raise ValueError(_('visible parameter should be set to '\ + 'show or hide')) + + db.api.volume_type_extra_specs_update_or_create(self.context, + volume_type['id'], + extra_specs) + result = volume_types.get_volume_type_by_name(self.context, name) + self._list({name: result}) class VolumeCommands(object): diff --git a/bin/nova-vsa b/bin/nova-vsa index d765e8f9e..2d6eee2c0 100755 --- a/bin/nova-vsa +++ b/bin/nova-vsa @@ -4,6 +4,7 @@ # Copyright (c) 2011 Zadara Storage Inc. # Copyright (c) 2011 OpenStack LLC. # +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -17,6 +18,10 @@ # under the License. """Starter script for Nova VSA.""" + +import eventlet +eventlet.monkey_patch() + import os import sys @@ -28,6 +33,7 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): sys.path.insert(0, possible_topdir) + from nova import flags from nova import log as logging from nova import service @@ -37,5 +43,7 @@ if __name__ == '__main__': utils.default_flagfile() flags.FLAGS(sys.argv) logging.setup() - service.serve() + utils.monkey_patch() + server = service.Service.create(binary='nova-vsa') + service.serve(server) service.wait() diff --git a/nova/api/openstack/contrib/drive_types.py b/nova/api/openstack/contrib/drive_types.py deleted file mode 100644 index 1aa65374f..000000000 --- a/nova/api/openstack/contrib/drive_types.py +++ /dev/null @@ -1,143 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" The Drive Types extension for Virtual Storage Arrays""" - -from webob import exc - -from nova.vsa import drive_types -from nova import exception -from nova import log as logging -from nova.api.openstack import common -from nova.api.openstack import extensions -from nova.api.openstack import faults - -LOG = logging.getLogger("nova.api.drive_types") - - -def _drive_type_view(drive): - """Maps keys for drive types view.""" - d = {} - - d['id'] = drive['id'] - d['displayName'] = drive['name'] - d['type'] = drive['type'] - d['size'] = drive['size_gb'] - d['rpm'] = drive['rpm'] - d['capabilities'] = drive['capabilities'] - return d - - -class DriveTypeController(object): - """The Drive Type API controller for the OpenStack API.""" - - _serialization_metadata = { - 'application/xml': { - "attributes": { - "drive_type": [ - "id", - "displayName", - "type", - "size", - "rpm", - "capabilities", - ]}}} - - def index(self, req): - """Returns a list of drive types.""" - - context = req.environ['nova.context'] - dtypes = drive_types.get_all(context) - limited_list = common.limited(dtypes, req) - res = [_drive_type_view(drive) for drive in limited_list] - return {'drive_types': res} - - def show(self, req, id): - """Return data about the given drive type.""" - context = req.environ['nova.context'] - - try: - drive = drive_types.get(context, id) - except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) - - return {'drive_type': _drive_type_view(drive)} - - def create(self, req, body): - """Creates a new drive type.""" - context = req.environ['nova.context'] - - if not body: - return faults.Fault(exc.HTTPUnprocessableEntity()) - - drive = body['drive_type'] - - name = drive.get('displayName') - type = drive.get('type') - size = drive.get('size') - rpm = drive.get('rpm') - capabilities = drive.get('capabilities') - - LOG.audit(_("Create drive type %(name)s for "\ - "%(type)s:%(size)s:%(rpm)s"), locals(), context=context) - - new_drive = drive_types.create(context, - type=type, - size_gb=size, - rpm=rpm, - capabilities=capabilities, - name=name) - - return {'drive_type': _drive_type_view(new_drive)} - - def delete(self, req, id): - """Deletes a drive type.""" - context = req.environ['nova.context'] - - LOG.audit(_("Delete drive type with id: %s"), id, context=context) - - try: - drive_types.delete(context, id) - except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) - - -class Drive_types(extensions.ExtensionDescriptor): - - def get_name(self): - return "DriveTypes" - - def get_alias(self): - return "zadr-drive_types" - - def get_description(self): - return "Drive Types support" - - def get_namespace(self): - return "http://docs.openstack.org/ext/drive_types/api/v1.1" - - def get_updated(self): - return "2011-06-29T00:00:00+00:00" - - def get_resources(self): - resources = [] - res = extensions.ResourceExtension( - 'zadr-drive_types', - DriveTypeController()) - - resources.append(res) - return resources diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index 81dbc9e1f..f3e4fc849 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -106,6 +106,10 @@ class VsaController(object): self.network_api = network.API() super(VsaController, self).__init__() + def _get_instances_by_vsa_id(self, context, id): + return self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(id))}) + def _items(self, req, details): """Return summary or detailed list of VSAs.""" context = req.environ['nova.context'] @@ -114,8 +118,7 @@ class VsaController(object): vsa_list = [] for vsa in limited_list: - instances = self.compute_api.get_all(context, - search_opts={'metadata': dict(vsa_id=str(vsa.get('id')))}) + instances = self._get_instances_by_vsa_id(context, vsa.get('id')) vsa_list.append(_vsa_view(context, vsa, details, instances)) return {'vsaSet': vsa_list} @@ -136,9 +139,7 @@ class VsaController(object): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - instances = self.compute_api.get_all(context, - search_opts={'metadata': dict(vsa_id=str(vsa.get('id')))}) - + instances = self._get_instances_by_vsa_id(context, vsa.get('id')) return {'vsa': _vsa_view(context, vsa, True, instances)} def create(self, req, body): @@ -171,9 +172,7 @@ class VsaController(object): vsa = self.vsa_api.create(context, **args) - instances = self.compute_api.get_all(context, - search_opts={'metadata': dict(vsa_id=str(vsa.get('id')))}) - + instances = self._get_instances_by_vsa_id(context, vsa.get('id')) return {'vsa': _vsa_view(context, vsa, True, instances)} def delete(self, req, id): @@ -202,14 +201,14 @@ class VsaController(object): locals(), context=context) try: - instances = self.compute_api.get_all(context, - search_opts={'metadata': dict(vsa_id=str(id))}) - - if instances is None or len(instances)==0: + instances = self._get_instances_by_vsa_id(context, id) + if instances is None or len(instances) == 0: return faults.Fault(exc.HTTPNotFound()) for instance in instances: - self.network_api.allocate_for_instance(context, instance, vpn=False) + self.network_api.allocate_for_instance(context, instance, + vpn=False) + # Placeholder return except exception.NotFound: @@ -228,6 +227,7 @@ class VsaController(object): LOG.audit(_("Disassociate address from VSA %(id)s"), locals(), context=context) + # Placeholder class VsaVolumeDriveController(volumes.VolumeController): @@ -255,6 +255,7 @@ class VsaVolumeDriveController(volumes.VolumeController): def __init__(self): self.volume_api = volume.API() + self.vsa_api = vsa.API() super(VsaVolumeDriveController, self).__init__() def _translation(self, context, vol, vsa_id, details): @@ -264,7 +265,7 @@ class VsaVolumeDriveController(volumes.VolumeController): translation = volumes.translate_volume_summary_view d = translation(context, vol) - d['vsaId'] = vol[self.direction] + d['vsaId'] = vsa_id d['name'] = vol['name'] return d @@ -276,8 +277,9 @@ class VsaVolumeDriveController(volumes.VolumeController): LOG.error(_("%(obj)s with ID %(id)s not found"), locals()) raise - own_vsa_id = volume_ref[self.direction] - if own_vsa_id != int(vsa_id): + own_vsa_id = self.volume_api.get_volume_metadata_value(volume_ref, + self.direction) + if own_vsa_id != vsa_id: LOG.error(_("%(obj)s with ID %(id)s belongs to VSA %(own_vsa_id)s"\ " and not to VSA %(vsa_id)s."), locals()) raise exception.Invalid() @@ -286,8 +288,8 @@ class VsaVolumeDriveController(volumes.VolumeController): """Return summary or detailed list of volumes for particular VSA.""" context = req.environ['nova.context'] - vols = self.volume_api.get_all_by_vsa(context, vsa_id, - self.direction.split('_')[0]) + vols = self.volume_api.get_all(context, + search_opts={'metadata': {self.direction: str(vsa_id)}}) limited_list = common.limited(vols, req) res = [self._translation(context, vol, vsa_id, details) \ @@ -317,11 +319,19 @@ class VsaVolumeDriveController(volumes.VolumeController): size = vol['size'] LOG.audit(_("Create volume of %(size)s GB from VSA ID %(vsa_id)s"), locals(), context=context) + try: + # create is supported for volumes only (drives created through VSA) + volume_type = self.vsa_api.get_vsa_volume_type(context) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) - new_volume = self.volume_api.create(context, size, None, - vol.get('displayName'), - vol.get('displayDescription'), - from_vsa_id=vsa_id) + new_volume = self.volume_api.create(context, + size, + None, + vol.get('displayName'), + vol.get('displayDescription'), + volume_type=volume_type, + metadata=dict(from_vsa_id=str(vsa_id))) return {self.object: self._translation(context, new_volume, vsa_id, True)} diff --git a/nova/db/api.py b/nova/db/api.py index 354a90571..a2e581fe9 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -918,16 +918,6 @@ def volume_get_all_by_project(context, project_id): return IMPL.volume_get_all_by_project(context, project_id) -def volume_get_all_assigned_to_vsa(context, vsa_id): - """Get all volumes assigned to particular VSA.""" - return IMPL.volume_get_all_assigned_to_vsa(context, vsa_id) - - -def volume_get_all_assigned_from_vsa(context, vsa_id): - """Get all volumes created from particular VSA.""" - return IMPL.volume_get_all_assigned_from_vsa(context, vsa_id) - - def volume_get_by_ec2_id(context, ec2_id): """Get a volume by ec2 id.""" return IMPL.volume_get_by_ec2_id(context, ec2_id) @@ -1528,36 +1518,6 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id, #################### -def drive_type_create(context, values): - """Creates drive type record.""" - return IMPL.drive_type_create(context, values) - - -def drive_type_update(context, drive_type_id, values): - """Updates drive type record.""" - return IMPL.drive_type_update(context, drive_type_id, values) - - -def drive_type_destroy(context, drive_type_id): - """Deletes drive type record.""" - return IMPL.drive_type_destroy(context, drive_type_id) - - -def drive_type_get(context, drive_type_id): - """Get drive type record by id.""" - return IMPL.drive_type_get(context, drive_type_id) - - -def drive_type_get_by_name(context, name): - """Get drive type record by name.""" - return IMPL.drive_type_get_by_name(context, name) - - -def drive_type_get_all(context, visible): - """Returns all (or only visible) drive types.""" - return IMPL.drive_type_get_all(context, visible) - - def vsa_create(context, values): """Creates Virtual Storage Array record.""" return IMPL.vsa_create(context, values) @@ -1586,8 +1546,3 @@ def vsa_get_all(context): def vsa_get_all_by_project(context, project_id): """Get all Virtual Storage Array records by project ID.""" return IMPL.vsa_get_all_by_project(context, project_id) - - -def vsa_get_vc_ips_list(context, vsa_id): - """Retrieves IPs of instances associated with Virtual Storage Array.""" - return IMPL.vsa_get_vc_ips_list(context, vsa_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 7a572f55a..65b09a65d 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -2226,7 +2226,6 @@ def volume_get(context, volume_id, session=None): options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ filter_by(id=volume_id).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -2235,7 +2234,6 @@ def volume_get(context, volume_id, session=None): options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ filter_by(project_id=context.project_id).\ filter_by(id=volume_id).\ filter_by(deleted=False).\ @@ -2253,7 +2251,6 @@ def volume_get_all(context): options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2265,7 +2262,6 @@ def volume_get_all_by_host(context, host): options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2277,7 +2273,6 @@ def volume_get_all_by_instance(context, instance_id): result = session.query(models.Volume).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ filter_by(instance_id=instance_id).\ filter_by(deleted=False).\ all() @@ -2286,28 +2281,6 @@ def volume_get_all_by_instance(context, instance_id): return result -@require_admin_context -def volume_get_all_assigned_to_vsa(context, vsa_id): - session = get_session() - result = session.query(models.Volume).\ - options(joinedload('drive_type')).\ - filter_by(to_vsa_id=vsa_id).\ - filter_by(deleted=False).\ - all() - return result - - -@require_admin_context -def volume_get_all_assigned_from_vsa(context, vsa_id): - session = get_session() - result = session.query(models.Volume).\ - options(joinedload('drive_type')).\ - filter_by(from_vsa_id=vsa_id).\ - filter_by(deleted=False).\ - all() - return result - - @require_context def volume_get_all_by_project(context, project_id): authorize_project_context(context, project_id) @@ -2317,7 +2290,6 @@ def volume_get_all_by_project(context, project_id): options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -2332,7 +2304,6 @@ def volume_get_instance(context, volume_id): options(joinedload('instance')).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')).\ - options(joinedload('drive_type')).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) @@ -2377,7 +2348,7 @@ def volume_update(context, volume_id, values): volume_ref = volume_get(context, volume_id, session=session) volume_ref.update(values) volume_ref.save(session=session) - return volume_ref + #################### @@ -3871,106 +3842,6 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id, #################### -@require_admin_context -def drive_type_create(context, values): - """ - Creates drive type record. - """ - try: - drive_type_ref = models.DriveTypes() - drive_type_ref.update(values) - drive_type_ref.save() - except Exception, e: - raise exception.DBError(e) - return drive_type_ref - - -@require_admin_context -def drive_type_update(context, drive_type_id, values): - """ - Updates drive type record. - """ - session = get_session() - with session.begin(): - drive_type_ref = drive_type_get(context, drive_type_id, - session=session) - drive_type_ref.update(values) - drive_type_ref.save(session=session) - return drive_type_ref - - -@require_admin_context -def drive_type_destroy(context, drive_type_id): - """ - Deletes drive type record. - """ - session = get_session() - drive_type_ref = session.query(models.DriveTypes).\ - filter_by(id=drive_type_id) - records = drive_type_ref.delete() - if records == 0: - raise exception.VirtualDiskTypeNotFound(id=drive_type_id) - - -@require_context -def drive_type_get(context, drive_type_id, session=None): - """ - Get drive type record by id. - """ - if not session: - session = get_session() - - result = session.query(models.DriveTypes).\ - filter_by(id=drive_type_id).\ - filter_by(deleted=can_read_deleted(context)).\ - first() - if not result: - raise exception.VirtualDiskTypeNotFound(id=drive_type_id) - - return result - - -@require_context -def drive_type_get_by_name(context, name, session=None): - """ - Get drive type record by name. - """ - if not session: - session = get_session() - - result = session.query(models.DriveTypes).\ - filter_by(name=name).\ - filter_by(deleted=can_read_deleted(context)).\ - first() - if not result: - raise exception.VirtualDiskTypeNotFoundByName(name=name) - - return result - - -@require_context -def drive_type_get_all(context, visible): - """ - Returns all (or only visible) drive types. - """ - session = get_session() - if visible: - drive_types = session.query(models.DriveTypes).\ - filter_by(deleted=can_read_deleted(context)).\ - filter_by(visible=True).\ - order_by("name").\ - all() - else: - drive_types = session.query(models.DriveTypes).\ - filter_by(deleted=can_read_deleted(context)).\ - order_by("name").\ - all() - return drive_types - - - #################### - - @require_admin_context def vsa_create(context, values): """ @@ -4067,26 +3938,4 @@ def vsa_get_all_by_project(context, project_id): all() -@require_context -def vsa_get_vc_ips_list(context, vsa_id): - """ - Retrieves IPs of instances associated with Virtual Storage Array. - """ - result = [] - - vc_instances = instance_get_all_by_filters(context, - search_opts={'metadata': dict(vsa_id=str(vsa_id))}) - for vc_instance in vc_instances: - if vc_instance['fixed_ips']: - for fixed in vc_instance['fixed_ips']: - # insert the [floating,fixed] (if exists) in the head, - # otherwise append the [none,fixed] in the tail - ip = {} - ip['fixed'] = fixed['address'] - if fixed['floating_ips']: - ip['floating'] = fixed['floating_ips'][0]['address'] - result.append(ip) - - return result - #################### diff --git a/nova/db/sqlalchemy/migrate_repo/versions/042_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/042_add_vsa_data.py deleted file mode 100644 index 8a57bd234..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/042_add_vsa_data.py +++ /dev/null @@ -1,133 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table -from sqlalchemy import Text, Boolean, ForeignKey - -from nova import log as logging - -meta = MetaData() - -# Just for the ForeignKey and column creation to succeed, these are not the -# actual definitions of tables . -# - -volumes = Table('volumes', meta, - Column('id', Integer(), primary_key=True, nullable=False), - ) - -to_vsa_id = Column('to_vsa_id', Integer(), nullable=True) -from_vsa_id = Column('from_vsa_id', Integer(), nullable=True) -drive_type_id = Column('drive_type_id', Integer(), nullable=True) - - -# New Tables -# - -virtual_storage_arrays = Table('virtual_storage_arrays', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('display_name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('display_description', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('project_id', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('availability_zone', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('instance_type_id', Integer(), nullable=False), - Column('image_ref', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('vc_count', Integer(), nullable=False), - Column('vol_count', Integer(), nullable=False), - Column('status', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - ) - -drive_types = Table('drive_types', meta, - Column('created_at', DateTime(timezone=False)), - Column('updated_at', DateTime(timezone=False)), - Column('deleted_at', DateTime(timezone=False)), - Column('deleted', Boolean(create_constraint=True, name=None)), - Column('id', Integer(), primary_key=True, nullable=False), - Column('name', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False), - unique=True), - Column('type', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('size_gb', Integer(), nullable=False), - Column('rpm', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('capabilities', - String(length=255, convert_unicode=False, assert_unicode=None, - unicode_error=None, _warn_on_bytestring=False)), - Column('visible', Boolean(create_constraint=True, name=None)), - ) - -new_tables = (virtual_storage_arrays, drive_types) - -# -# Tables to alter -# - - -def upgrade(migrate_engine): - - from nova import context - from nova import db - from nova import flags - - FLAGS = flags.FLAGS - - # Upgrade operations go here. Don't create your own engine; - # bind migrate_engine to your metadata - meta.bind = migrate_engine - - for table in new_tables: - try: - table.create() - except Exception: - logging.info(repr(table)) - logging.exception('Exception while creating table') - raise - - volumes.create_column(to_vsa_id) - volumes.create_column(from_vsa_id) - volumes.create_column(drive_type_id) - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - volumes.drop_column(to_vsa_id) - volumes.drop_column(from_vsa_id) - volumes.drop_column(drive_type_id) - - for table in new_tables: - table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py new file mode 100644 index 000000000..844643704 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/043_add_vsa_data.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, DateTime, Integer, MetaData, String, Table +from sqlalchemy import Text, Boolean, ForeignKey + +from nova import log as logging + +meta = MetaData() + +# +# New Tables +# + +virtual_storage_arrays = Table('virtual_storage_arrays', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type_id', Integer(), nullable=False), + Column('image_ref', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vc_count', Integer(), nullable=False), + Column('vol_count', Integer(), nullable=False), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + virtual_storage_arrays.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + virtual_storage_arrays.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 65464ece5..f8feb0b4f 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -352,13 +352,6 @@ class Volume(BASE, NovaBase): volume_type_id = Column(Integer) - to_vsa_id = Column(Integer, - ForeignKey('virtual_storage_arrays.id'), nullable=True) - from_vsa_id = Column(Integer, - ForeignKey('virtual_storage_arrays.id'), nullable=True) - drive_type_id = Column(Integer, - ForeignKey('drive_types.id'), nullable=True) - class VolumeMetadata(BASE, NovaBase): """Represents a metadata key/value pair for a volume""" @@ -402,38 +395,6 @@ class VolumeTypeExtraSpecs(BASE, NovaBase): 'VolumeTypeExtraSpecs.deleted == False)') -class DriveTypes(BASE, NovaBase): - """Represents the known drive types (storage media).""" - __tablename__ = 'drive_types' - - id = Column(Integer, primary_key=True, autoincrement=True) - - """ - @property - def name(self): - if self.capabilities: - return FLAGS.drive_type_template_long % \ - (self.type, str(self.size_gb), self.rpm, self.capabilities) - else: - return FLAGS.drive_type_template_short % \ - (self.type, str(self.size_gb), self.rpm) - """ - - name = Column(String(255), unique=True) - type = Column(String(255)) - size_gb = Column(Integer) - rpm = Column(String(255)) - capabilities = Column(String(255)) - - visible = Column(Boolean, default=True) - - volumes = relationship(Volume, - backref=backref('drive_type', uselist=False), - foreign_keys=id, - primaryjoin='and_(Volume.drive_type_id == ' - 'DriveTypes.id)') - - class Quota(BASE, NovaBase): """Represents a single quota override for a project. @@ -918,7 +879,9 @@ def register_models(): Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, Project, Certificate, ConsolePool, Console, Zone, - AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration) + VolumeMetadata, VolumeTypes, VolumeTypeExtraSpecs, + AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration, + VirtualStorageArray) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py index 7b717115c..643e2338e 100644 --- a/nova/db/sqlalchemy/session.py +++ b/nova/db/sqlalchemy/session.py @@ -30,9 +30,11 @@ import nova.exception import nova.flags import nova.log + FLAGS = nova.flags.FLAGS LOG = nova.log.getLogger("nova.db.sqlalchemy") + try: import MySQLdb except ImportError: diff --git a/nova/exception.py b/nova/exception.py index f75d0b832..32981f4d5 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -365,10 +365,6 @@ class VolumeTypeExtraSpecsNotFound(NotFound): "key %(extra_specs_key)s.") -class VolumeNotFoundForVsa(VolumeNotFound): - message = _("Volume not found for vsa %(vsa_id)s.") - - class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") @@ -799,14 +795,6 @@ class VirtualStorageArrayNotFoundByName(NotFound): message = _("Virtual Storage Array %(name)s could not be found.") -class VirtualDiskTypeNotFound(NotFound): - message = _("Drive Type %(id)d could not be found.") - - -class VirtualDiskTypeNotFoundByName(NotFound): - message = _("Drive Type %(name)s could not be found.") - - class CannotResizeToSameSize(NovaException): message = _("When resizing, instances must change size!") diff --git a/nova/log.py b/nova/log.py index 3b86d78e8..eb0b6020f 100644 --- a/nova/log.py +++ b/nova/log.py @@ -32,6 +32,7 @@ import json import logging import logging.handlers import os +import stat import sys import traceback @@ -258,7 +259,6 @@ class NovaRootLogger(NovaLogger): self.addHandler(self.filelog) self.logpath = logpath - import stat st = os.stat(self.logpath) if st.st_mode != (stat.S_IFREG | FLAGS.logfile_mode): os.chmod(self.logpath, FLAGS.logfile_mode) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 3de605ae2..57c1d0c28 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -508,7 +508,6 @@ def get_dhcp_hosts(context, network_ref): if network_ref['multi_host'] and FLAGS.host != host: continue hosts.append(_host_dhcp(fixed_ref)) - return '\n'.join(hosts) diff --git a/nova/quota.py b/nova/quota.py index 48e598659..771477747 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -116,8 +116,9 @@ def allowed_volumes(context, requested_volumes, size): allowed_gigabytes = _get_request_allotment(requested_gigabytes, used_gigabytes, quota['gigabytes']) - allowed_volumes = min(allowed_volumes, - int(allowed_gigabytes // size)) + if size != 0: + allowed_volumes = min(allowed_volumes, + int(allowed_gigabytes // size)) return min(requested_volumes, allowed_volumes) diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index 218ad5c7b..ad5ebc2dc 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -20,15 +20,15 @@ VSA Simple Scheduler """ from nova import context -from nova import rpc from nova import db from nova import flags +from nova import log as logging +from nova import rpc from nova import utils -from nova.vsa.api import VsaState -from nova.volume import api as volume_api from nova.scheduler import driver from nova.scheduler import simple -from nova import log as logging +from nova.vsa.api import VsaState +from nova.volume import volume_types LOG = logging.getLogger('nova.scheduler.vsa') @@ -67,21 +67,21 @@ class VsaScheduler(simple.SimpleScheduler): def _compare_names(str1, str2): return str1.lower() == str2.lower() - def _compare_sizes_approxim(cap_capacity, size_gb): + def _compare_sizes_approxim(cap_capacity, size): cap_capacity = BYTES_TO_GB(int(cap_capacity)) - size_gb = int(size_gb) - size_perc = size_gb * \ + size = int(size) + size_perc = size * \ FLAGS.drive_type_approx_capacity_percent / 100 - return cap_capacity >= size_gb - size_perc and \ - cap_capacity <= size_gb + size_perc + return cap_capacity >= size - size_perc and \ + cap_capacity <= size + size_perc # Add more entries for additional comparisons compare_list = [{'cap1': 'DriveType', 'cap2': 'type', 'cmp_func': _compare_names}, {'cap1': 'DriveCapacity', - 'cap2': 'size_gb', + 'cap2': 'size', 'cmp_func': _compare_sizes_approxim}] for cap in compare_list: @@ -193,8 +193,8 @@ class VsaScheduler(simple.SimpleScheduler): 'attach_status': "detached", 'display_name': vol['name'], 'display_description': vol['description'], - 'to_vsa_id': vsa_id, - 'drive_type_id': vol['drive_ref']['id'], + 'volume_type_id': vol['volume_type_id'], + 'metadata': dict(to_vsa_id=vsa_id), 'host': vol['host'], 'scheduled_at': now } @@ -228,7 +228,8 @@ class VsaScheduler(simple.SimpleScheduler): def _assign_hosts_to_volumes(self, context, volume_params, forced_host): - prev_drive_type_id = None + prev_volume_type_id = None + request_spec = {} selected_hosts = [] LOG.debug(_("volume_params %(volume_params)s") % locals()) @@ -244,14 +245,25 @@ class VsaScheduler(simple.SimpleScheduler): vol['capabilities'] = None continue - drive_type = vol['drive_ref'] - request_spec = {'size': vol['size'], - 'drive_type': dict(drive_type)} + volume_type_id = vol['volume_type_id'] + request_spec['size'] = vol['size'] - if prev_drive_type_id != drive_type['id']: + if prev_volume_type_id is None or\ + prev_volume_type_id != volume_type_id: # generate list of hosts for this drive type + + volume_type = volume_types.get_volume_type(context, + volume_type_id) + drive_type = { + 'name': volume_type['extra_specs'].get('drive_name'), + 'type': volume_type['extra_specs'].get('drive_type'), + 'size': int(volume_type['extra_specs'].get('drive_size')), + 'rpm': volume_type['extra_specs'].get('drive_rpm'), + } + request_spec['drive_type'] = drive_type + all_hosts = self._filter_hosts("volume", request_spec) - prev_drive_type_id = drive_type['id'] + prev_volume_type_id = volume_type_id (host, qos_cap) = self._select_hosts(request_spec, all_hosts, selected_hosts) @@ -279,8 +291,7 @@ class VsaScheduler(simple.SimpleScheduler): self._provision_volume(context, vol, vsa_id, availability_zone) except: if vsa_id: - db.vsa_update(context, vsa_id, - dict(status=VsaState.FAILED)) + db.vsa_update(context, vsa_id, dict(status=VsaState.FAILED)) for vol in volume_params: if 'capabilities' in vol: @@ -302,12 +313,23 @@ class VsaScheduler(simple.SimpleScheduler): 'scheduled_at': now}) return host - drive_type = volume_ref['drive_type'] - if drive_type is None: + volume_type_id = volume_ref['volume_type_id'] + if volume_type_id: + volume_type = volume_types.get_volume_type(context, volume_type_id) + + if volume_type_id is None or\ + volume_types.is_vsa_volume(volume_type_id, volume_type): + LOG.debug(_("Non-VSA volume %d"), volume_ref['id']) return super(VsaScheduler, self).schedule_create_volume(context, volume_id, *_args, **_kwargs) - drive_type = dict(drive_type) + + drive_type = { + 'name': volume_type['extra_specs'].get('drive_name'), + 'type': volume_type['extra_specs'].get('drive_type'), + 'size': int(volume_type['extra_specs'].get('drive_size')), + 'rpm': volume_type['extra_specs'].get('drive_rpm'), + } LOG.debug(_("Spawning volume %(volume_id)s with drive type "\ "%(drive_type)s"), locals()) diff --git a/nova/tests/api/openstack/contrib/test_drive_types.py b/nova/tests/api/openstack/contrib/test_drive_types.py deleted file mode 100644 index 2f7d327d3..000000000 --- a/nova/tests/api/openstack/contrib/test_drive_types.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import stubout -import webob - -#from nova import compute -from nova.vsa import drive_types -from nova import exception -from nova import context -from nova import test -from nova import log as logging -from nova.tests.api.openstack import fakes - -from nova.api.openstack.contrib.drive_types import _drive_type_view - -LOG = logging.getLogger('nova.tests.api.openstack.drive_types') - -last_param = {} - - -def _get_default_drive_type(): - param = { - 'name': 'Test drive type', - 'type': 'SATA', - 'size_gb': 123, - 'rpm': '7200', - 'capabilities': '', - 'visible': True - } - return param - - -def _create(context, **param): - global last_param - LOG.debug(_("_create: %s"), param) - param['id'] = 123 - last_param = param - return param - - -def _delete(context, id): - global last_param - last_param = dict(id=id) - - LOG.debug(_("_delete: %s"), locals()) - - -def _get(context, id): - global last_param - last_param = dict(id=id) - - LOG.debug(_("_get: %s"), locals()) - if id != '123': - raise exception.NotFound - - dtype = _get_default_drive_type() - dtype['id'] = id - return dtype - - -def _get_all(context, visible=True): - LOG.debug(_("_get_all: %s"), locals()) - dtype = _get_default_drive_type() - dtype['id'] = 123 - return [dtype] - - -class DriveTypesApiTest(test.TestCase): - def setUp(self): - super(DriveTypesApiTest, self).setUp() - self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.reset_fake_data() - fakes.FakeAuthDatabase.data = {} - fakes.stub_out_networking(self.stubs) - fakes.stub_out_rate_limiting(self.stubs) - fakes.stub_out_auth(self.stubs) - self.stubs.Set(drive_types, "create", _create) - self.stubs.Set(drive_types, "delete", _delete) - self.stubs.Set(drive_types, "get", _get) - self.stubs.Set(drive_types, "get_all", _get_all) - - self.context = context.get_admin_context() - - def tearDown(self): - self.stubs.UnsetAll() - super(DriveTypesApiTest, self).tearDown() - - def test_drive_types_api_create(self): - global last_param - last_param = {} - - dtype = _get_default_drive_type() - dtype['id'] = 123 - - body = dict(drive_type=_drive_type_view(dtype)) - req = webob.Request.blank('/v1.1/zadr-drive_types') - req.method = 'POST' - req.body = json.dumps(body) - req.headers['content-type'] = 'application/json' - - resp = req.get_response(fakes.wsgi_app()) - self.assertEqual(resp.status_int, 200) - - # Compare if parameters were correctly passed to stub - for k, v in last_param.iteritems(): - self.assertEqual(last_param[k], dtype[k]) - - resp_dict = json.loads(resp.body) - - # Compare response - self.assertTrue('drive_type' in resp_dict) - resp_dtype = resp_dict['drive_type'] - self.assertEqual(resp_dtype, _drive_type_view(dtype)) - - def test_drive_types_api_delete(self): - global last_param - last_param = {} - - dtype_id = 123 - req = webob.Request.blank('/v1.1/zadr-drive_types/%d' % dtype_id) - req.method = 'DELETE' - - resp = req.get_response(fakes.wsgi_app()) - self.assertEqual(resp.status_int, 200) - self.assertEqual(str(last_param['id']), str(dtype_id)) - - def test_drive_types_show(self): - global last_param - last_param = {} - - dtype_id = 123 - req = webob.Request.blank('/v1.1/zadr-drive_types/%d' % dtype_id) - req.method = 'GET' - resp = req.get_response(fakes.wsgi_app()) - self.assertEqual(resp.status_int, 200) - self.assertEqual(str(last_param['id']), str(dtype_id)) - - resp_dict = json.loads(resp.body) - - # Compare response - self.assertTrue('drive_type' in resp_dict) - resp_dtype = resp_dict['drive_type'] - exp_dtype = _get_default_drive_type() - exp_dtype['id'] = dtype_id - exp_dtype_view = _drive_type_view(exp_dtype) - for k, v in exp_dtype_view.iteritems(): - self.assertEqual(str(resp_dtype[k]), str(exp_dtype_view[k])) - - def test_drive_types_show_invalid_id(self): - global last_param - last_param = {} - - dtype_id = 234 - req = webob.Request.blank('/v1.1/zadr-drive_types/%d' % dtype_id) - req.method = 'GET' - resp = req.get_response(fakes.wsgi_app()) - self.assertEqual(resp.status_int, 404) - self.assertEqual(str(last_param['id']), str(dtype_id)) - - def test_drive_types_index(self): - - req = webob.Request.blank('/v1.1/zadr-drive_types') - req.method = 'GET' - resp = req.get_response(fakes.wsgi_app()) - self.assertEqual(resp.status_int, 200) - - resp_dict = json.loads(resp.body) - - self.assertTrue('drive_types' in resp_dict) - resp_dtypes = resp_dict['drive_types'] - self.assertEqual(len(resp_dtypes), 1) - - resp_dtype = resp_dtypes.pop() - exp_dtype = _get_default_drive_type() - exp_dtype['id'] = 123 - exp_dtype_view = _drive_type_view(exp_dtype) - for k, v in exp_dtype_view.iteritems(): - self.assertEqual(str(resp_dtype[k]), str(exp_dtype_view[k])) diff --git a/nova/tests/api/openstack/contrib/test_vsa.py b/nova/tests/api/openstack/contrib/test_vsa.py index a9b76b0ff..311b6cb8d 100644 --- a/nova/tests/api/openstack/contrib/test_vsa.py +++ b/nova/tests/api/openstack/contrib/test_vsa.py @@ -18,15 +18,14 @@ import stubout import unittest import webob - +from nova import context +from nova import db from nova import exception from nova import flags -from nova import vsa -from nova import db -from nova import volume -from nova import context -from nova import test from nova import log as logging +from nova import test +from nova import volume +from nova import vsa from nova.api import openstack from nova.tests.api.openstack import fakes import nova.wsgi @@ -120,7 +119,7 @@ class VSAApiTest(test.TestCase): vsa = {"displayName": "VSA Test Name", "displayDescription": "VSA Test Desc"} body = dict(vsa=vsa) - req = webob.Request.blank('/v1.1/zadr-vsa') + req = webob.Request.blank('/v1.1/777/zadr-vsa') req.method = 'POST' req.body = json.dumps(body) req.headers['content-type'] = 'application/json' @@ -139,7 +138,7 @@ class VSAApiTest(test.TestCase): vsa['displayDescription']) def test_vsa_create_no_body(self): - req = webob.Request.blank('/v1.1/zadr-vsa') + req = webob.Request.blank('/v1.1/777/zadr-vsa') req.method = 'POST' req.body = json.dumps({}) req.headers['content-type'] = 'application/json' @@ -152,7 +151,7 @@ class VSAApiTest(test.TestCase): last_param = {} vsa_id = 123 - req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) req.method = 'DELETE' resp = req.get_response(fakes.wsgi_app()) @@ -164,7 +163,7 @@ class VSAApiTest(test.TestCase): last_param = {} vsa_id = 234 - req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) req.method = 'DELETE' resp = req.get_response(fakes.wsgi_app()) @@ -176,7 +175,7 @@ class VSAApiTest(test.TestCase): last_param = {} vsa_id = 123 - req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) req.method = 'GET' resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) @@ -191,14 +190,14 @@ class VSAApiTest(test.TestCase): last_param = {} vsa_id = 234 - req = webob.Request.blank('/v1.1/zadr-vsa/%d' % vsa_id) + req = webob.Request.blank('/v1.1/777/zadr-vsa/%d' % vsa_id) req.method = 'GET' resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 404) self.assertEqual(str(last_param['vsa_id']), str(vsa_id)) def test_vsa_index(self): - req = webob.Request.blank('/v1.1/zadr-vsa') + req = webob.Request.blank('/v1.1/777/zadr-vsa') req.method = 'GET' resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) @@ -213,7 +212,7 @@ class VSAApiTest(test.TestCase): self.assertEqual(resp_vsa['id'], 123) def test_vsa_detail(self): - req = webob.Request.blank('/v1.1/zadr-vsa/detail') + req = webob.Request.blank('/v1.1/777/zadr-vsa/detail') req.method = 'GET' resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) @@ -239,17 +238,21 @@ def _get_default_volume_param(): 'name': 'vol name', 'display_name': 'Default vol name', 'display_description': 'Default vol description', - 'from_vsa_id': None, - 'to_vsa_id': None, + 'volume_type_id': 1, + 'volume_metadata': [], } +def stub_get_vsa_volume_type(self, context): + return {'id': 1, + 'name': 'VSA volume type', + 'extra_specs': {'type': 'vsa_volume'}} + + def stub_volume_create(self, context, size, snapshot_id, name, description, **param): LOG.debug(_("_create: param=%s"), size) vol = _get_default_volume_param() - for k, v in param.iteritems(): - vol[k] = v vol['size'] = size vol['display_name'] = name vol['display_description'] = description @@ -270,10 +273,10 @@ def stub_volume_get(self, context, volume_id): LOG.debug(_("_volume_get: volume_id=%s"), volume_id) vol = _get_default_volume_param() vol['id'] = volume_id - if volume_id == '234': - vol['from_vsa_id'] = 123 + meta = {'key': 'from_vsa_id', 'value': '123'} if volume_id == '345': - vol['to_vsa_id'] = 123 + meta = {'key': 'to_vsa_id', 'value': '123'} + vol['volume_metadata'].append(meta) return vol @@ -281,9 +284,9 @@ def stub_volume_get_notfound(self, context, volume_id): raise exception.NotFound -def stub_volume_get_all_by_vsa(self, context, vsa_id, direction): +def stub_volume_get_all(self, context, search_opts): vol = stub_volume_get(self, context, '123') - vol['%s_vsa_id' % direction] = vsa_id + vol['metadata'] = search_opts['metadata'] return [vol] @@ -302,13 +305,13 @@ class VSAVolumeApiTest(test.TestCase): fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_auth(self.stubs) self.stubs.Set(nova.db.api, 'vsa_get', return_vsa) + self.stubs.Set(vsa.api.API, "get_vsa_volume_type", + stub_get_vsa_volume_type) - self.stubs.Set(volume.api.API, "create", stub_volume_create) self.stubs.Set(volume.api.API, "update", stub_volume_update) self.stubs.Set(volume.api.API, "delete", stub_volume_delete) - self.stubs.Set(volume.api.API, "get_all_by_vsa", - stub_volume_get_all_by_vsa) self.stubs.Set(volume.api.API, "get", stub_volume_get) + self.stubs.Set(volume.api.API, "get_all", stub_volume_get_all) self.context = context.get_admin_context() self.test_obj = test_obj if test_obj else "volume" @@ -319,11 +322,13 @@ class VSAVolumeApiTest(test.TestCase): super(VSAVolumeApiTest, self).tearDown() def test_vsa_volume_create(self): + self.stubs.Set(volume.api.API, "create", stub_volume_create) + vol = {"size": 100, "displayName": "VSA Volume Test Name", "displayDescription": "VSA Volume Test Desc"} body = {self.test_obj: vol} - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s' % self.test_objs) + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs) req.method = 'POST' req.body = json.dumps(body) req.headers['content-type'] = 'application/json' @@ -344,7 +349,7 @@ class VSAVolumeApiTest(test.TestCase): self.assertEqual(resp.status_int, 400) def test_vsa_volume_create_no_body(self): - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s' % self.test_objs) + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs) req.method = 'POST' req.body = json.dumps({}) req.headers['content-type'] = 'application/json' @@ -356,25 +361,25 @@ class VSAVolumeApiTest(test.TestCase): self.assertEqual(resp.status_int, 400) def test_vsa_volume_index(self): - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s' % self.test_objs) + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s' % self.test_objs) resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) def test_vsa_volume_detail(self): - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/detail' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/detail' % \ self.test_objs) resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) def test_vsa_volume_show(self): obj_num = 234 if self.test_objs == "volumes" else 345 - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \ (self.test_objs, obj_num)) resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) def test_vsa_volume_show_no_vsa_assignment(self): - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \ (self.test_objs)) resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 400) @@ -382,7 +387,7 @@ class VSAVolumeApiTest(test.TestCase): def test_vsa_volume_show_no_volume(self): self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound) - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \ (self.test_objs)) resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 404) @@ -392,7 +397,7 @@ class VSAVolumeApiTest(test.TestCase): update = {"status": "available", "displayName": "Test Display name"} body = {self.test_obj: update} - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \ (self.test_objs, obj_num)) req.method = 'PUT' req.body = json.dumps(body) @@ -406,7 +411,7 @@ class VSAVolumeApiTest(test.TestCase): def test_vsa_volume_delete(self): obj_num = 234 if self.test_objs == "volumes" else 345 - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/%s' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/%s' % \ (self.test_objs, obj_num)) req.method = 'DELETE' resp = req.get_response(fakes.wsgi_app()) @@ -416,7 +421,7 @@ class VSAVolumeApiTest(test.TestCase): self.assertEqual(resp.status_int, 400) def test_vsa_volume_delete_no_vsa_assignment(self): - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/4/%s/333' % \ (self.test_objs)) req.method = 'DELETE' resp = req.get_response(fakes.wsgi_app()) @@ -425,7 +430,7 @@ class VSAVolumeApiTest(test.TestCase): def test_vsa_volume_delete_no_volume(self): self.stubs.Set(volume.api.API, "get", stub_volume_get_notfound) - req = webob.Request.blank('/v1.1/zadr-vsa/123/%s/333' % \ + req = webob.Request.blank('/v1.1/777/zadr-vsa/123/%s/333' % \ (self.test_objs)) req.method = 'DELETE' resp = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 6e9cae38d..05267d8fb 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -85,7 +85,6 @@ class ExtensionControllerTest(test.TestCase): ext_path = os.path.join(os.path.dirname(__file__), "extensions") self.flags(osapi_extensions_path=ext_path) self.ext_list = [ - "DriveTypes", "Createserverext", "FlavorExtraSpecs", "Floating_ips", @@ -96,8 +95,8 @@ class ExtensionControllerTest(test.TestCase): "Quotas", "Rescue", "SecurityGroups", - "VirtualInterfaces", "VSAs", + "VirtualInterfaces", "Volumes", "VolumeTypes", ] diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py index 697ad3842..309db96a2 100644 --- a/nova/tests/scheduler/test_vsa_scheduler.py +++ b/nova/tests/scheduler/test_vsa_scheduler.py @@ -16,13 +16,15 @@ import stubout import nova + +from nova import context +from nova import db from nova import exception from nova import flags -from nova import db -from nova import context +from nova import log as logging from nova import test from nova import utils -from nova import log as logging +from nova.volume import volume_types from nova.scheduler import vsa as vsa_sched from nova.scheduler import driver @@ -52,15 +54,26 @@ class VsaSchedulerTestCase(test.TestCase): def _get_vol_creation_request(self, num_vols, drive_ix, size=0): volume_params = [] for i in range(num_vols): - drive_type = {'id': i, - 'name': 'name_' + str(drive_ix), - 'type': 'type_' + str(drive_ix), - 'size_gb': 1 + 100 * (drive_ix)} + + name = 'name_' + str(i) + try: + volume_types.create(self.context, name, + extra_specs={'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': 'type_' + str(drive_ix), + 'drive_size': 1 + 100 * (drive_ix)}) + self.created_types_lst.append(name) + except exception.ApiError: + # type is already created + pass + + volume_type = volume_types.get_volume_type_by_name(self.context, + name) volume = {'size': size, 'snapshot_id': None, 'name': 'vol_' + str(i), 'description': None, - 'drive_ref': drive_type} + 'volume_type_id': volume_type['id']} volume_params.append(volume) return {'num_volumes': len(volume_params), @@ -217,7 +230,12 @@ class VsaSchedulerTestCase(test.TestCase): self.stubs.Set(nova.db, 'volume_get', self._fake_volume_get) self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update) + self.created_types_lst = [] + def tearDown(self): + for name in self.created_types_lst: + volume_types.purge(self.context, name) + self.stubs.UnsetAll() super(VsaSchedulerTestCase, self).tearDown() @@ -463,7 +481,7 @@ class VsaSchedulerTestCase(test.TestCase): global global_volume global_volume = {} - global_volume['drive_type'] = None + global_volume['volume_type_id'] = None self.assertRaises(driver.NoValidHost, self.sched.schedule_create_volume, @@ -485,12 +503,16 @@ class VsaSchedulerTestCase(test.TestCase): global_volume = {} drive_ix = 2 - drive_type = {'id': drive_ix, - 'name': 'name_' + str(drive_ix), - 'type': 'type_' + str(drive_ix), - 'size_gb': 1 + 100 * (drive_ix)} - - global_volume['drive_type'] = drive_type + name = 'name_' + str(drive_ix) + volume_types.create(self.context, name, + extra_specs={'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': 'type_' + str(drive_ix), + 'drive_size': 1 + 100 * (drive_ix)}) + self.created_types_lst.append(name) + volume_type = volume_types.get_volume_type_by_name(self.context, name) + + global_volume['volume_type_id'] = volume_type['id'] global_volume['size'] = 0 host = self.sched.schedule_create_volume(self.context, @@ -525,12 +547,16 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase): global_volume = {} drive_ix = 2 - drive_type = {'id': drive_ix, - 'name': 'name_' + str(drive_ix), - 'type': 'type_' + str(drive_ix), - 'size_gb': 1 + 100 * (drive_ix)} - - global_volume['drive_type'] = drive_type + name = 'name_' + str(drive_ix) + volume_types.create(self.context, name, + extra_specs={'type': 'vsa_drive', + 'drive_name': name, + 'drive_type': 'type_' + str(drive_ix), + 'drive_size': 1 + 100 * (drive_ix)}) + self.created_types_lst.append(name) + volume_type = volume_types.get_volume_type_by_name(self.context, name) + + global_volume['volume_type_id'] = volume_type['id'] global_volume['size'] = 0 host = self.sched.schedule_create_volume(self.context, diff --git a/nova/tests/test_drive_types.py b/nova/tests/test_drive_types.py deleted file mode 100644 index b52e6705b..000000000 --- a/nova/tests/test_drive_types.py +++ /dev/null @@ -1,146 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Unit Tests for drive types codecode -""" -import time - -from nova import context -from nova import flags -from nova import log as logging -from nova import test -from nova.vsa import drive_types - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.test_drive_types') - - -class DriveTypesTestCase(test.TestCase): - """Test cases for driver types code""" - def setUp(self): - super(DriveTypesTestCase, self).setUp() - self.ctxt = context.RequestContext(None, None) - self.ctxt_admin = context.get_admin_context() - self._dtype = self._create_default_drive_type() - - def tearDown(self): - self._dtype = None - - def _create_default_drive_type(self): - """Create a volume object.""" - dtype = {} - dtype['type'] = 'SATA' - dtype['size_gb'] = 150 - dtype['rpm'] = 5000 - dtype['capabilities'] = None - dtype['visible'] = True - - LOG.debug(_("Default values for Drive Type: %s"), dtype) - return dtype - - def test_drive_type_create_delete(self): - dtype = self._dtype - prev_all_dtypes = drive_types.get_all(self.ctxt_admin, False) - - new = drive_types.create(self.ctxt_admin, **dtype) - for k, v in dtype.iteritems(): - self.assertEqual(v, new[k], 'one of fields doesnt match') - - new_all_dtypes = drive_types.get_all(self.ctxt_admin, False) - self.assertNotEqual(len(prev_all_dtypes), - len(new_all_dtypes), - 'drive type was not created') - - drive_types.delete(self.ctxt_admin, new['id']) - new_all_dtypes = drive_types.get_all(self.ctxt_admin, False) - self.assertEqual(prev_all_dtypes, - new_all_dtypes, - 'drive types was not deleted') - - def test_drive_type_check_name_generation(self): - dtype = self._dtype - new = drive_types.create(self.ctxt_admin, **dtype) - expected_name = FLAGS.drive_type_template_short % \ - (dtype['type'], dtype['size_gb'], dtype['rpm']) - self.assertEqual(new['name'], expected_name, - 'name was not generated correctly') - - dtype['capabilities'] = 'SEC' - new2 = drive_types.create(self.ctxt_admin, **dtype) - expected_name = FLAGS.drive_type_template_long % \ - (dtype['type'], dtype['size_gb'], dtype['rpm'], - dtype['capabilities']) - self.assertEqual(new2['name'], expected_name, - 'name was not generated correctly') - - drive_types.delete(self.ctxt_admin, new['id']) - drive_types.delete(self.ctxt_admin, new2['id']) - - def test_drive_type_create_delete_invisible(self): - dtype = self._dtype - dtype['visible'] = False - prev_all_dtypes = drive_types.get_all(self.ctxt_admin, True) - new = drive_types.create(self.ctxt_admin, **dtype) - - new_all_dtypes = drive_types.get_all(self.ctxt_admin, True) - self.assertEqual(prev_all_dtypes, new_all_dtypes) - - new_all_dtypes = drive_types.get_all(self.ctxt_admin, False) - self.assertNotEqual(prev_all_dtypes, new_all_dtypes) - - drive_types.delete(self.ctxt_admin, new['id']) - - def test_drive_type_rename_update(self): - dtype = self._dtype - dtype['capabilities'] = None - - new = drive_types.create(self.ctxt_admin, **dtype) - for k, v in dtype.iteritems(): - self.assertEqual(v, new[k], 'one of fields doesnt match') - - new_name = 'NEW_DRIVE_NAME' - new = drive_types.rename(self.ctxt_admin, new['name'], new_name) - self.assertEqual(new['name'], new_name) - - new = drive_types.rename(self.ctxt_admin, new_name) - expected_name = FLAGS.drive_type_template_short % \ - (dtype['type'], dtype['size_gb'], dtype['rpm']) - self.assertEqual(new['name'], expected_name) - - changes = {'rpm': 7200} - new = drive_types.update(self.ctxt_admin, new['id'], **changes) - for k, v in changes.iteritems(): - self.assertEqual(v, new[k], 'one of fields doesnt match') - - drive_types.delete(self.ctxt_admin, new['id']) - - def test_drive_type_get(self): - dtype = self._dtype - new = drive_types.create(self.ctxt_admin, **dtype) - - new2 = drive_types.get(self.ctxt_admin, new['id']) - for k, v in new2.iteritems(): - self.assertEqual(str(new[k]), str(new2[k]), - 'one of fields doesnt match') - - new2 = drive_types.get_by_name(self.ctxt_admin, new['name']) - for k, v in new.iteritems(): - self.assertEqual(str(new[k]), str(new2[k]), - 'one of fields doesnt match') - - drive_types.delete(self.ctxt_admin, new['id']) diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py index 726939744..300a4d71c 100644 --- a/nova/tests/test_vsa.py +++ b/nova/tests/test_vsa.py @@ -13,38 +13,29 @@ # License for the specific language governing permissions and limitations # under the License. -import stubout import base64 +import stubout from xml.etree import ElementTree from xml.etree.ElementTree import Element, SubElement +from nova import context +from nova import db from nova import exception from nova import flags +from nova import log as logging +from nova import test from nova import vsa from nova import volume -from nova import db -from nova import context -from nova import test -from nova import log as logging +from nova.volume import volume_types +from nova.vsa import utils as vsa_utils + import nova.image.fake FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.vsa') -def fake_drive_type_get_by_name(context, name): - drive_type = { - 'id': 1, - 'name': name, - 'type': name.split('_')[0], - 'size_gb': int(name.split('_')[1]), - 'rpm': name.split('_')[2], - 'capabilities': '', - 'visible': True} - return drive_type - - class VsaTestCase(test.TestCase): def setUp(self): @@ -53,9 +44,20 @@ class VsaTestCase(test.TestCase): self.vsa_api = vsa.API() self.volume_api = volume.API() + FLAGS.quota_volumes = 100 + FLAGS.quota_gigabytes = 10000 + self.context_non_admin = context.RequestContext(None, None) self.context = context.get_admin_context() + volume_types.create(self.context, + 'SATA_500_7200', + extra_specs={'type': 'vsa_drive', + 'drive_name': 'SATA_500_7200', + 'drive_type': 'SATA', + 'drive_size': '500', + 'drive_rpm': '7200'}) + def fake_show_by_name(meh, context, name): if name == 'wrong_image_name': LOG.debug(_("Test: Emulate wrong VSA name. Raise")) @@ -124,9 +126,6 @@ class VsaTestCase(test.TestCase): FLAGS.vsa_multi_vol_creation = multi_vol_creation - self.stubs.Set(nova.vsa.drive_types, 'get_by_name', - fake_drive_type_get_by_name) - param = {'storage': [{'drive_name': 'SATA_500_7200', 'num_drives': 3}]} vsa_ref = self.vsa_api.create(self.context, **param) @@ -157,8 +156,6 @@ class VsaTestCase(test.TestCase): self.vsa_api.delete(self.context, vsa_ref['id']) def test_vsa_generate_user_data(self): - self.stubs.Set(nova.vsa.drive_types, 'get_by_name', - fake_drive_type_get_by_name) FLAGS.vsa_multi_vol_creation = False param = {'display_name': 'VSA name test', @@ -167,12 +164,10 @@ class VsaTestCase(test.TestCase): 'storage': [{'drive_name': 'SATA_500_7200', 'num_drives': 3}]} vsa_ref = self.vsa_api.create(self.context, **param) - volumes = db.volume_get_all_assigned_to_vsa(self.context, - vsa_ref['id']) + volumes = self.vsa_api.get_all_vsa_drives(self.context, + vsa_ref['id']) - user_data = self.vsa_api.generate_user_data(self.context, - vsa_ref, - volumes) + user_data = vsa_utils.generate_user_data(vsa_ref, volumes) user_data = base64.b64decode(user_data) LOG.debug(_("Test: user_data = %s"), user_data) diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py index d451a4377..43173d86a 100644 --- a/nova/tests/test_vsa_volumes.py +++ b/nova/tests/test_vsa_volumes.py @@ -29,15 +29,6 @@ FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.vsa.volumes') -def _default_volume_param(): - return { - 'size': 1, - 'snapshot_id': None, - 'name': 'Test volume name', - 'description': 'Test volume desc name' - } - - class VsaVolumesTestCase(test.TestCase): def setUp(self): @@ -49,6 +40,8 @@ class VsaVolumesTestCase(test.TestCase): self.context_non_admin = context.RequestContext(None, None) self.context = context.get_admin_context() + self.default_vol_type = self.vsa_api.get_vsa_volume_type(self.context) + def fake_show_by_name(meh, context, name): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} @@ -66,12 +59,23 @@ class VsaVolumesTestCase(test.TestCase): self.stubs.UnsetAll() super(VsaVolumesTestCase, self).tearDown() + def _default_volume_param(self): + return { + 'size': 1, + 'snapshot_id': None, + 'name': 'Test volume name', + 'description': 'Test volume desc name', + 'volume_type': self.default_vol_type, + 'metadata': {'from_vsa_id': self.vsa_id} + } + + def _get_all_volumes_by_vsa(self): + return self.volume_api.get_all(self.context, + search_opts={'metadata': {"from_vsa_id": str(self.vsa_id)}}) + def test_vsa_volume_create_delete(self): """ Check if volume properly created and deleted. """ - vols1 = self.volume_api.get_all_by_vsa(self.context, - self.vsa_id, "from") - volume_param = _default_volume_param() - volume_param['from_vsa_id'] = self.vsa_id + volume_param = self._default_volume_param() volume_ref = self.volume_api.create(self.context, **volume_param) self.assertEqual(volume_ref['display_name'], @@ -81,21 +85,34 @@ class VsaVolumesTestCase(test.TestCase): self.assertEqual(volume_ref['size'], volume_param['size']) self.assertEqual(volume_ref['status'], - 'available') + 'creating') - vols2 = self.volume_api.get_all_by_vsa(self.context, - self.vsa_id, "from") - self.assertEqual(len(vols1) + 1, len(vols2)) + vols2 = self._get_all_volumes_by_vsa() + self.assertEqual(1, len(vols2)) + volume_ref = vols2[0] + self.assertEqual(volume_ref['display_name'], + volume_param['name']) + self.assertEqual(volume_ref['display_description'], + volume_param['description']) + self.assertEqual(volume_ref['size'], + volume_param['size']) + self.assertEqual(volume_ref['status'], + 'creating') + + self.volume_api.update(self.context, + volume_ref['id'], {'status': 'available'}) self.volume_api.delete(self.context, volume_ref['id']) - vols3 = self.volume_api.get_all_by_vsa(self.context, - self.vsa_id, "from") - self.assertEqual(len(vols3) + 1, len(vols2)) + + vols3 = self._get_all_volumes_by_vsa() + self.assertEqual(1, len(vols2)) + volume_ref = vols3[0] + self.assertEqual(volume_ref['status'], + 'deleting') def test_vsa_volume_delete_nonavail_volume(self): """ Check volume deleton in different states. """ - volume_param = _default_volume_param() - volume_param['from_vsa_id'] = self.vsa_id + volume_param = self._default_volume_param() volume_ref = self.volume_api.create(self.context, **volume_param) self.volume_api.update(self.context, @@ -104,26 +121,18 @@ class VsaVolumesTestCase(test.TestCase): self.volume_api.delete, self.context, volume_ref['id']) - self.volume_api.update(self.context, - volume_ref['id'], {'status': 'error'}) - self.volume_api.delete(self.context, volume_ref['id']) - def test_vsa_volume_delete_vsa_with_volumes(self): """ Check volume deleton in different states. """ - vols1 = self.volume_api.get_all_by_vsa(self.context, - self.vsa_id, "from") + vols1 = self._get_all_volumes_by_vsa() for i in range(3): - volume_param = _default_volume_param() - volume_param['from_vsa_id'] = self.vsa_id + volume_param = self._default_volume_param() volume_ref = self.volume_api.create(self.context, **volume_param) - vols2 = self.volume_api.get_all_by_vsa(self.context, - self.vsa_id, "from") + vols2 = self._get_all_volumes_by_vsa() self.assertEqual(len(vols1) + 3, len(vols2)) self.vsa_api.delete(self.context, self.vsa_id) - vols3 = self.volume_api.get_all_by_vsa(self.context, - self.vsa_id, "from") + vols3 = self._get_all_volumes_by_vsa() self.assertEqual(len(vols1), len(vols3)) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 2f0559366..6d1958401 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -203,6 +203,7 @@ class XenAPIVMTestCase(test.TestCase): self.context = context.RequestContext(self.user_id, self.project_id) self.conn = xenapi_conn.get_connection(False) + @test.skip_test("Skip this test meanwhile") def test_parallel_builds(self): stubs.stubout_loopingcall_delay(self.stubs) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index abbef69bd..363a20ed0 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -135,8 +135,6 @@ flags.DEFINE_string('default_local_format', None, 'The default format a local_volume will be formatted with ' 'on creation.') - - flags.DEFINE_bool('libvirt_use_virtio_for_bridges', False, 'Use virtio for bridge interfaces') @@ -1088,7 +1086,8 @@ class LibvirtConnection(driver.ComputeDriver): 'ebs_root': ebs_root, 'local_device': local_device, 'volumes': block_device_mapping, - 'use_virtio_for_bridges': FLAGS.libvirt_use_virtio_for_bridges, + 'use_virtio_for_bridges': + FLAGS.libvirt_use_virtio_for_bridges, 'ephemerals': ephemerals} root_device_name = driver.block_device_info_get_root(block_device_info) diff --git a/nova/volume/api.py b/nova/volume/api.py index e66792373..d9c082514 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -42,9 +42,7 @@ class API(base.Base): """API for interacting with the volume manager.""" def create(self, context, size, snapshot_id, name, description, - volume_type=None, metadata=None, - to_vsa_id=None, from_vsa_id=None, drive_type_id=None, - availability_zone=None): + volume_type=None, metadata=None, availability_zone=None): if snapshot_id != None: snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": @@ -53,13 +51,12 @@ class API(base.Base): if not size: size = snapshot['volume_size'] - if to_vsa_id is None: - if quota.allowed_volumes(context, 1, size) < 1: - pid = context.project_id - LOG.warn(_("Quota exceeded for %(pid)s, tried to create" - " %(size)sG volume") % locals()) - raise quota.QuotaError(_("Volume quota exceeded. You cannot " - "create a volume of size %sG") % size) + if quota.allowed_volumes(context, 1, size) < 1: + pid = context.project_id + LOG.warn(_("Quota exceeded for %(pid)s, tried to create" + " %(size)sG volume") % locals()) + raise quota.QuotaError(_("Volume quota exceeded. You cannot " + "create a volume of size %sG") % size) if availability_zone is None: availability_zone = FLAGS.storage_availability_zone @@ -81,19 +78,9 @@ class API(base.Base): 'display_description': description, 'volume_type_id': volume_type_id, 'metadata': metadata, - 'to_vsa_id': to_vsa_id, - 'from_vsa_id': from_vsa_id, - 'drive_type_id': drive_type_id, } volume = self.db.volume_create(context, options) - if from_vsa_id is not None: # for FE VSA volumes do nothing - now = utils.utcnow() - volume = self.db.volume_update(context, - volume['id'], {'status': 'available', - 'launched_at': now}) - return volume - rpc.cast(context, FLAGS.scheduler_topic, {"method": "create_volume", @@ -112,15 +99,6 @@ class API(base.Base): def delete(self, context, volume_id): volume = self.get(context, volume_id) - - if volume['from_vsa_id'] is not None: - if volume['status'] == "in-use": - raise exception.ApiError(_("Volume is in use. "\ - "Detach it first")) - self.db.volume_destroy(context, volume['id']) - LOG.debug(_("volume %d: deleted successfully"), volume['id']) - return - if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) now = utils.utcnow() @@ -154,7 +132,7 @@ class API(base.Base): for i in volume.get('volume_metadata'): volume_metadata[i['key']] = i['value'] - for k, v in searchdict: + for k, v in searchdict.iteritems(): if k not in volume_metadata.keys()\ or volume_metadata[k] != v: return False @@ -163,6 +141,7 @@ class API(base.Base): # search_option to filter_name mapping. filter_mapping = {'metadata': _check_metadata_match} + result = [] for volume in volumes: # go over all filters in the list for opt, values in search_opts.iteritems(): @@ -172,21 +151,12 @@ class API(base.Base): # no such filter - ignore it, go to next filter continue else: - if filter_func(volume, values) == False: - # if one of conditions didn't match - remove - volumes.remove(volume) + if filter_func(volume, values): + result.append(volume) break + volumes = result return volumes - def get_all_by_vsa(self, context, vsa_id, direction): - if direction == "to": - return self.db.volume_get_all_assigned_to_vsa(context, vsa_id) - elif direction == "from": - return self.db.volume_get_all_assigned_from_vsa(context, vsa_id) - else: - raise exception.ApiError(_("Unsupported vol assignment type %s"), - direction) - def get_snapshot(self, context, snapshot_id): rv = self.db.snapshot_get(context, snapshot_id) return dict(rv.iteritems()) @@ -286,3 +256,12 @@ class API(base.Base): self.db.volume_metadata_update(context, volume_id, _metadata, True) return _metadata + + def get_volume_metadata_value(self, volume, key): + """Get value of particular metadata key.""" + metadata = volume.get('volume_metadata') + if metadata: + for i in volume['volume_metadata']: + if i['key'] == key: + return i['value'] + return None diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 9e046d054..2e9a394c7 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -28,6 +28,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import utils +from nova.volume import volume_types LOG = logging.getLogger("nova.volume.driver") @@ -516,7 +517,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) if not iscsi_properties['target_discovered']: - self._run_iscsiadm(iscsi_properties, '--op=new') + self._run_iscsiadm(iscsi_properties, ('--op', 'new')) if iscsi_properties.get('auth_method'): self._iscsiadm_update(iscsi_properties, @@ -568,7 +569,7 @@ class ISCSIDriver(VolumeDriver): iscsi_properties = self._get_iscsi_properties(volume) self._iscsiadm_update(iscsi_properties, "node.startup", "manual") self._run_iscsiadm(iscsi_properties, "--logout") - self._run_iscsiadm(iscsi_properties, '--op=delete') + self._run_iscsiadm(iscsi_properties, ('--op', 'delete')) def check_for_export(self, context, volume_id): """Make sure volume is exported.""" @@ -813,9 +814,15 @@ class LoggingVolumeDriver(VolumeDriver): class ZadaraBEDriver(ISCSIDriver): """Performs actions to configure Zadara BE module.""" - def _not_vsa_be_volume(self, volume): + def _is_vsa_volume(self, volume): + return volume_types.is_vsa_volume(volume['volume_type_id']) + + def _is_vsa_drive(self, volume): + return volume_types.is_vsa_drive(volume['volume_type_id']) + + def _not_vsa_volume_or_drive(self, volume): """Returns True if volume is not VSA BE volume.""" - if volume['to_vsa_id'] is None: + if not volume_types.is_vsa_object(volume['volume_type_id']): LOG.debug(_("\tVolume %s is NOT VSA volume"), volume['name']) return True else: @@ -828,9 +835,14 @@ class ZadaraBEDriver(ISCSIDriver): """ Volume Driver methods """ def create_volume(self, volume): """Creates BE volume.""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).create_volume(volume) + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s creation - do nothing"), + volume['name']) + return + if int(volume['size']) == 0: sizestr = '0' # indicates full-partition else: @@ -838,9 +850,16 @@ class ZadaraBEDriver(ISCSIDriver): # Set the qos-str to default type sas qosstr = 'SAS_1000' - drive_type = volume.get('drive_type') - if drive_type is not None: - qosstr = drive_type['type'] + ("_%s" % drive_type['size_gb']) + LOG.debug(_("\tvolume_type_id=%s"), volume['volume_type_id']) + + volume_type = volume_types.get_volume_type(None, + volume['volume_type_id']) + + LOG.debug(_("\tvolume_type=%s"), volume_type) + + if volume_type is not None: + qosstr = volume_type['extra_specs']['drive_type'] + \ + ("_%s" % volume_type['extra_specs']['drive_size']) try: self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', @@ -858,9 +877,14 @@ class ZadaraBEDriver(ISCSIDriver): def delete_volume(self, volume): """Deletes BE volume.""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).delete_volume(volume) + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s deletion - do nothing"), + volume['name']) + return + try: self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', 'delete_partition', @@ -874,16 +898,26 @@ class ZadaraBEDriver(ISCSIDriver): LOG.debug(_("VSA BE delete_volume for %s suceeded"), volume['name']) def local_path(self, volume): - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).local_path(volume) + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s local path call - call discover"), + volume['name']) + return super(ZadaraBEDriver, self).discover_volume(None, volume) + raise exception.Error(_("local_path not supported")) def ensure_export(self, context, volume): """ensure BE export for a volume""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).ensure_export(context, volume) + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s ensure export - do nothing"), + volume['name']) + return + try: iscsi_target = self.db.volume_get_iscsi_target_num(context, volume['id']) @@ -900,9 +934,14 @@ class ZadaraBEDriver(ISCSIDriver): def create_export(self, context, volume): """create BE export for a volume""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).create_export(context, volume) + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s create export - do nothing"), + volume['name']) + return + self._ensure_iscsi_targets(context, volume['host']) iscsi_target = self.db.volume_allocate_iscsi_target(context, volume['id'], @@ -915,9 +954,14 @@ class ZadaraBEDriver(ISCSIDriver): def remove_export(self, context, volume): """Removes BE export for a volume.""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).remove_export(context, volume) + if self._is_vsa_volume(volume): + LOG.debug(_("\tFE VSA Volume %s remove export - do nothing"), + volume['name']) + return + try: iscsi_target = self.db.volume_get_iscsi_target_num(context, volume['id']) @@ -939,14 +983,14 @@ class ZadaraBEDriver(ISCSIDriver): def create_snapshot(self, snapshot): """Nothing required for snapshot""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).create_snapshot(volume) pass def delete_snapshot(self, snapshot): """Nothing required to delete a snapshot""" - if self._not_vsa_be_volume(volume): + if self._not_vsa_volume_or_drive(volume): return super(ZadaraBEDriver, self).delete_snapshot(volume) pass diff --git a/nova/volume/manager.py b/nova/volume/manager.py index b23bff1fc..63656d485 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -45,11 +45,12 @@ intact. from nova import context from nova import exception -from nova import rpc from nova import flags from nova import log as logging from nova import manager +from nova import rpc from nova import utils +from nova.volume import volume_types LOG = logging.getLogger('nova.volume.manager') @@ -144,13 +145,23 @@ class VolumeManager(manager.SchedulerDependentManager): return volume_id def _notify_vsa(self, context, volume_ref, status): - if volume_ref['to_vsa_id'] is not None: - rpc.cast(context, - FLAGS.vsa_topic, - {"method": "vsa_volume_created", - "args": {"vol_id": volume_ref['id'], - "vsa_id": volume_ref['to_vsa_id'], - "status": status}}) + if volume_ref['volume_type_id'] is None: + return + + if volume_types.is_vsa_drive(volume_ref['volume_type_id']): + vsa_id = None + for i in volume_ref.get('volume_metadata'): + if i['key'] == 'to_vsa_id': + vsa_id = int(i['value']) + break + + if vsa_id: + rpc.cast(context, + FLAGS.vsa_topic, + {"method": "vsa_volume_created", + "args": {"vol_id": volume_ref['id'], + "vsa_id": vsa_id, + "status": status}}) def delete_volume(self, context, volume_id): """Deletes and unexports volume.""" diff --git a/nova/volume/san.py b/nova/volume/san.py index bdebb7783..9532c8116 100644 --- a/nova/volume/san.py +++ b/nova/volume/san.py @@ -64,14 +64,12 @@ class SanISCSIDriver(ISCSIDriver): # discover_volume is still OK # undiscover_volume is still OK - def _connect_to_ssh(self, san_ip=None): - if san_ip is None: - san_ip = FLAGS.san_ip + def _connect_to_ssh(self): ssh = paramiko.SSHClient() #TODO(justinsb): We need a better SSH key policy ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if FLAGS.san_password: - ssh.connect(san_ip, + ssh.connect(FLAGS.san_ip, port=FLAGS.san_ssh_port, username=FLAGS.san_login, password=FLAGS.san_password) @@ -79,7 +77,7 @@ class SanISCSIDriver(ISCSIDriver): privatekeyfile = os.path.expanduser(FLAGS.san_privatekey) # It sucks that paramiko doesn't support DSA keys privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile) - ssh.connect(san_ip, + ssh.connect(FLAGS.san_ip, port=FLAGS.san_ssh_port, username=FLAGS.san_login, pkey=privatekey) @@ -87,9 +85,9 @@ class SanISCSIDriver(ISCSIDriver): raise exception.Error(_("Specify san_password or san_privatekey")) return ssh - def _run_ssh(self, command, check_exit_code=True, san_ip=None): + def _run_ssh(self, command, check_exit_code=True): #TODO(justinsb): SSH connection caching (?) - ssh = self._connect_to_ssh(san_ip) + ssh = self._connect_to_ssh() #TODO(justinsb): Reintroduce the retry hack ret = ssh_execute(ssh, command, check_exit_code=check_exit_code) diff --git a/nova/volume/volume_types.py b/nova/volume/volume_types.py index 9b02d4ccc..ffa9e6e02 100644 --- a/nova/volume/volume_types.py +++ b/nova/volume/volume_types.py @@ -100,20 +100,22 @@ def get_all_types(context, inactive=0, search_opts={}): continue else: if filter_func(type_args, values): - # if one of conditions didn't match - remove result[type_name] = type_args break vol_types = result return vol_types -def get_volume_type(context, id): +def get_volume_type(ctxt, id): """Retrieves single volume type by id.""" if id is None: raise exception.InvalidVolumeType(volume_type=id) + if ctxt is None: + ctxt = context.get_admin_context() + try: - return db.volume_type_get(context, id) + return db.volume_type_get(ctxt, id) except exception.DBError: raise exception.ApiError(_("Unknown volume type: %s") % id) @@ -127,3 +129,38 @@ def get_volume_type_by_name(context, name): return db.volume_type_get_by_name(context, name) except exception.DBError: raise exception.ApiError(_("Unknown volume type: %s") % name) + + +def is_key_value_present(volume_type_id, key, value, volume_type=None): + if volume_type_id is None: + return False + + if volume_type is None: + volume_type = get_volume_type(context.get_admin_context(), + volume_type_id) + if volume_type.get('extra_specs') is None or\ + volume_type['extra_specs'].get(key) != value: + return False + else: + return True + + +def is_vsa_drive(volume_type_id, volume_type=None): + return is_key_value_present(volume_type_id, + 'type', 'vsa_drive', volume_type) + + +def is_vsa_volume(volume_type_id, volume_type=None): + return is_key_value_present(volume_type_id, + 'type', 'vsa_volume', volume_type) + + +def is_vsa_object(volume_type_id): + if volume_type_id is None: + return False + + volume_type = get_volume_type(context.get_admin_context(), + volume_type_id) + + return is_vsa_drive(volume_type_id, volume_type) or\ + is_vsa_volume(volume_type_id, volume_type) diff --git a/nova/vsa/api.py b/nova/vsa/api.py index bb6e93b87..b279255d7 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -20,22 +20,26 @@ Handles all requests relating to Virtual Storage Arrays (VSAs). """ import sys -import base64 - -from xml.etree import ElementTree +from nova import compute from nova import db from nova import exception from nova import flags from nova import log as logging -from nova import quota from nova import rpc -from nova.db import base - -from nova import compute from nova import volume from nova.compute import instance_types -from nova.vsa import drive_types +from nova.db import base +from nova.volume import volume_types + + +class VsaState: + CREATING = 'creating' # VSA creating (not ready yet) + LAUNCHING = 'launching' # Launching VCs (all BE volumes were created) + CREATED = 'created' # VSA fully created and ready for use + PARTIAL = 'partial' # Some BE drives were allocated + FAILED = 'failed' # Some BE storage allocations failed + DELETING = 'deleting' # VSA started the deletion procedure FLAGS = flags.FLAGS @@ -43,22 +47,14 @@ flags.DEFINE_string('vsa_ec2_access_key', None, 'EC2 access key used by VSA for accessing nova') flags.DEFINE_string('vsa_ec2_user_id', None, 'User ID used by VSA for accessing nova') - flags.DEFINE_boolean('vsa_multi_vol_creation', True, 'Ask scheduler to create multiple volumes in one call') +flags.DEFINE_string('vsa_volume_type_name', 'VSA volume type', + 'Name of volume type associated with FE VSA volumes') LOG = logging.getLogger('nova.vsa') -class VsaState: - CREATING = 'creating' # VSA creating (not ready yet) - LAUNCHING = 'launching' # Launching VCs (all BE volumes were created) - CREATED = 'created' # VSA fully created and ready for use - PARTIAL = 'partial' # Some BE storage allocations failed - FAILED = 'failed' # Some BE storage allocations failed - DELETING = 'deleting' # VSA started the deletion procedure - - class API(base.Base): """API for interacting with the VSA manager.""" @@ -67,6 +63,15 @@ class API(base.Base): self.volume_api = volume_api or volume.API() super(API, self).__init__(**kwargs) + def _check_volume_type_correctness(self, vol_type): + if vol_type.get('extra_specs') == None or\ + vol_type['extra_specs'].get('type') != 'vsa_drive' or\ + vol_type['extra_specs'].get('drive_type') == None or\ + vol_type['extra_specs'].get('drive_size') == None: + + raise exception.ApiError(_("Invalid drive type %s") + % vol_type['name']) + def _get_default_vsa_instance_type(self): return instance_types.get_instance_type_by_name( FLAGS.default_vsa_instance_type) @@ -89,16 +94,17 @@ class API(base.Base): if name is None: raise exception.ApiError(_("No drive_name param found in %s") % node) - - # find DB record for this disk try: - drive_ref = drive_types.get_by_name(context, name) + vol_type = volume_types.get_volume_type_by_name(context, name) except exception.NotFound: raise exception.ApiError(_("Invalid drive type name %s") % name) + self._check_volume_type_correctness(vol_type) + # if size field present - override disk size specified in DB - size = node.get('size', drive_ref['size_gb']) + size = int(node.get('size', + vol_type['extra_specs'].get('drive_size'))) if shared: part_size = FLAGS.vsa_part_size_gb @@ -110,17 +116,15 @@ class API(base.Base): size = 0 # special handling for full drives for i in range(num_volumes): - # volume_name = vsa_name + ("_%s_vol-%d" % (name, i)) volume_name = "drive-%03d" % first_index first_index += 1 volume_desc = 'BE volume for VSA %s type %s' % \ (vsa_name, name) volume = { 'size': size, - 'snapshot_id': None, 'name': volume_name, 'description': volume_desc, - 'drive_ref': drive_ref + 'volume_type_id': vol_type['id'], } volume_params.append(volume) @@ -211,7 +215,7 @@ class API(base.Base): if len(volume_params) > 0: request_spec = { 'num_volumes': len(volume_params), - 'vsa_id': vsa_id, + 'vsa_id': str(vsa_id), 'volumes': volume_params, } @@ -227,17 +231,21 @@ class API(base.Base): try: vol_name = vol['name'] vol_size = vol['size'] + vol_type_id = vol['volume_type_id'] LOG.debug(_("VSA ID %(vsa_id)d %(vsa_name)s: Create "\ - "volume %(vol_name)s, %(vol_size)d GB"), - locals()) + "volume %(vol_name)s, %(vol_size)d GB, "\ + "type %(vol_type_id)s"), locals()) + + vol_type = volume_types.get_volume_type(context, + vol['volume_type_id']) vol_ref = self.volume_api.create(context, vol_size, - vol['snapshot_id'], + None, vol_name, vol['description'], - to_vsa_id=vsa_id, - drive_type_id=vol['drive_ref'].get('id'), + volume_type=vol_type, + metadata=dict(to_vsa_id=str(vsa_id)), availability_zone=availability_zone) except: self.update_vsa_status(context, vsa_id, @@ -249,7 +257,7 @@ class API(base.Base): rpc.cast(context, FLAGS.vsa_topic, {"method": "create_vsa", - "args": {"vsa_id": vsa_id}}) + "args": {"vsa_id": str(vsa_id)}}) return vsa_ref @@ -314,8 +322,7 @@ class API(base.Base): def _force_volume_delete(self, ctxt, volume): """Delete a volume, bypassing the check that it must be available.""" host = volume['host'] - if not host or volume['from_vsa_id']: - # Volume not yet assigned to host OR FE volume + if not host: # Deleting volume from database and skipping rpc. self.db.volume_destroy(ctxt, volume['id']) return @@ -328,9 +335,9 @@ class API(base.Base): def delete_vsa_volumes(self, context, vsa_id, direction, force_delete=True): if direction == "FE": - volumes = self.db.volume_get_all_assigned_from_vsa(context, vsa_id) + volumes = self.get_all_vsa_volumes(context, vsa_id) else: - volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id) + volumes = self.get_all_vsa_drives(context, vsa_id) for volume in volumes: try: @@ -374,58 +381,25 @@ class API(base.Base): return self.db.vsa_get_all(context) return self.db.vsa_get_all_by_project(context, context.project_id) - def generate_user_data(self, context, vsa, volumes): - SubElement = ElementTree.SubElement - - e_vsa = ElementTree.Element("vsa") + def get_vsa_volume_type(self, context): + name = FLAGS.vsa_volume_type_name + try: + vol_type = volume_types.get_volume_type_by_name(context, name) + except exception.NotFound: + volume_types.create(context, name, + extra_specs=dict(type='vsa_volume')) + vol_type = volume_types.get_volume_type_by_name(context, name) - e_vsa_detail = SubElement(e_vsa, "id") - e_vsa_detail.text = str(vsa['id']) - e_vsa_detail = SubElement(e_vsa, "name") - e_vsa_detail.text = vsa['display_name'] - e_vsa_detail = SubElement(e_vsa, "description") - e_vsa_detail.text = vsa['display_description'] - e_vsa_detail = SubElement(e_vsa, "vc_count") - e_vsa_detail.text = str(vsa['vc_count']) + return vol_type - e_vsa_detail = SubElement(e_vsa, "auth_user") - e_vsa_detail.text = FLAGS.vsa_ec2_user_id - e_vsa_detail = SubElement(e_vsa, "auth_access_key") - e_vsa_detail.text = FLAGS.vsa_ec2_access_key + def get_all_vsa_instances(self, context, vsa_id): + return self.compute_api.get_all(context, + search_opts={'metadata': dict(vsa_id=str(vsa_id))}) - e_volumes = SubElement(e_vsa, "volumes") - for volume in volumes: + def get_all_vsa_volumes(self, context, vsa_id): + return self.volume_api.get_all(context, + search_opts={'metadata': dict(from_vsa_id=str(vsa_id))}) - loc = volume['provider_location'] - if loc is None: - ip = '' - iscsi_iqn = '' - iscsi_portal = '' - else: - (iscsi_target, _sep, iscsi_iqn) = loc.partition(" ") - (ip, iscsi_portal) = iscsi_target.split(":", 1) - - e_vol = SubElement(e_volumes, "volume") - e_vol_detail = SubElement(e_vol, "id") - e_vol_detail.text = str(volume['id']) - e_vol_detail = SubElement(e_vol, "name") - e_vol_detail.text = volume['name'] - e_vol_detail = SubElement(e_vol, "display_name") - e_vol_detail.text = volume['display_name'] - e_vol_detail = SubElement(e_vol, "size_gb") - e_vol_detail.text = str(volume['size']) - e_vol_detail = SubElement(e_vol, "status") - e_vol_detail.text = volume['status'] - e_vol_detail = SubElement(e_vol, "ip") - e_vol_detail.text = ip - e_vol_detail = SubElement(e_vol, "iscsi_iqn") - e_vol_detail.text = iscsi_iqn - e_vol_detail = SubElement(e_vol, "iscsi_portal") - e_vol_detail.text = iscsi_portal - e_vol_detail = SubElement(e_vol, "lun") - e_vol_detail.text = '0' - e_vol_detail = SubElement(e_vol, "sn_host") - e_vol_detail.text = volume['host'] - - _xml = ElementTree.tostring(e_vsa) - return base64.b64encode(_xml) + def get_all_vsa_drives(self, context, vsa_id): + return self.volume_api.get_all(context, + search_opts={'metadata': dict(to_vsa_id=str(vsa_id))}) diff --git a/nova/vsa/drive_types.py b/nova/vsa/drive_types.py deleted file mode 100644 index 3cdbbfb09..000000000 --- a/nova/vsa/drive_types.py +++ /dev/null @@ -1,114 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2011 Zadara Storage Inc. -# Copyright (c) 2011 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Handles all requests relating to Virtual Storage Arrays (VSAs). -""" - -from nova import db -from nova import exception -from nova import flags -from nova import log as logging - -FLAGS = flags.FLAGS -flags.DEFINE_string('drive_type_template_short', '%s_%sGB_%sRPM', - 'Template string for generation of drive type name') -flags.DEFINE_string('drive_type_template_long', '%s_%sGB_%sRPM_%s', - 'Template string for generation of drive type name') - - -LOG = logging.getLogger('nova.drive_types') - - -def _generate_default_drive_name(type, size_gb, rpm, capabilities): - if capabilities is None or capabilities == '': - return FLAGS.drive_type_template_short % \ - (type, str(size_gb), rpm) - else: - return FLAGS.drive_type_template_long % \ - (type, str(size_gb), rpm, capabilities) - - -def create(context, type, size_gb, rpm, capabilities='', - visible=True, name=None): - if name is None: - name = _generate_default_drive_name(type, size_gb, rpm, - capabilities) - LOG.debug(_("Creating drive type %(name)s: "\ - "%(type)s %(size_gb)s %(rpm)s %(capabilities)s"), locals()) - - values = { - 'type': type, - 'size_gb': size_gb, - 'rpm': rpm, - 'capabilities': capabilities, - 'visible': visible, - 'name': name - } - return db.drive_type_create(context, values) - - -def update(context, id, **kwargs): - - LOG.debug(_("Updating drive type with id %(id)s: %(kwargs)s"), locals()) - - updatable_fields = ['type', - 'size_gb', - 'rpm', - 'capabilities', - 'visible'] - changes = {} - for field in updatable_fields: - if field in kwargs and \ - kwargs[field] is not None and \ - kwargs[field] != '': - changes[field] = kwargs[field] - - # call update regadless if changes is empty or not - return db.drive_type_update(context, id, changes) - - -def rename(context, name, new_name=None): - - if new_name is None or \ - new_name == '': - disk = db.drive_type_get_by_name(context, name) - new_name = _generate_default_drive_name(disk['type'], - disk['size_gb'], disk['rpm'], disk['capabilities']) - - LOG.debug(_("Renaming drive type %(name)s to %(new_name)s"), locals()) - - values = dict(name=new_name) - dtype = db.drive_type_get_by_name(context, name) - return db.drive_type_update(context, dtype['id'], values) - - -def delete(context, id): - LOG.debug(_("Deleting drive type %d"), id) - db.drive_type_destroy(context, id) - - -def get(context, id): - return db.drive_type_get(context, id) - - -def get_by_name(context, name): - return db.drive_type_get_by_name(context, name) - - -def get_all(context, visible=True): - return db.drive_type_get_all(context, visible) diff --git a/nova/vsa/fake.py b/nova/vsa/fake.py index 0bb81484d..d4248ca01 100644 --- a/nova/vsa/fake.py +++ b/nova/vsa/fake.py @@ -16,7 +16,7 @@ # under the License. -class FakeVcConnection: +class FakeVcConnection(object): def init_host(self, host): pass diff --git a/nova/vsa/manager.py b/nova/vsa/manager.py index 0f1718d38..d4c414106 100644 --- a/nova/vsa/manager.py +++ b/nova/vsa/manager.py @@ -22,17 +22,17 @@ Handles all processes relating to Virtual Storage Arrays (VSA). """ +from nova import compute +from nova import exception +from nova import flags from nova import log as logging from nova import manager -from nova import flags -from nova import utils -from nova import exception -from nova import compute from nova import volume from nova import vsa -from nova.vsa.api import VsaState +from nova import utils from nova.compute import instance_types - +from nova.vsa import utils as vsa_utils +from nova.vsa.api import VsaState FLAGS = flags.FLAGS flags.DEFINE_string('vsa_driver', 'nova.vsa.connection.get_connection', @@ -83,18 +83,18 @@ class VsaManager(manager.SchedulerDependentManager): @exception.wrap_exception() def vsa_volume_created(self, context, vol_id, vsa_id, status): """Callback for volume creations""" - LOG.debug(_("VSA ID %(vsa_id)s: Volume %(vol_id)s created. "\ + LOG.debug(_("VSA ID %(vsa_id)s: Drive %(vol_id)s created. "\ "Status %(status)s"), locals()) vsa_id = int(vsa_id) # just in case # Get all volumes for this VSA # check if any of them still in creating phase - volumes = self.db.volume_get_all_assigned_to_vsa(context, vsa_id) - for volume in volumes: - if volume['status'] == 'creating': - vol_name = volume['name'] - vol_disp_name = volume['display_name'] - LOG.debug(_("Volume %(vol_name)s (%(vol_disp_name)s) still "\ + drives = self.vsa_api.get_all_vsa_drives(context, vsa_id) + for drive in drives: + if drive['status'] == 'creating': + vol_name = drive['name'] + vol_disp_name = drive['display_name'] + LOG.debug(_("Drive %(vol_name)s (%(vol_disp_name)s) still "\ "in creating phase - wait"), locals()) return @@ -105,17 +105,17 @@ class VsaManager(manager.SchedulerDependentManager): LOG.exception(msg) return - if len(volumes) != vsa['vol_count']: - cvol_real = len(volumes) + if len(drives) != vsa['vol_count']: + cvol_real = len(drives) cvol_exp = vsa['vol_count'] LOG.debug(_("VSA ID %(vsa_id)d: Not all volumes are created "\ "(%(cvol_real)d of %(cvol_exp)d)"), locals()) return # all volumes created (successfully or not) - return self._start_vcs(context, vsa, volumes) + return self._start_vcs(context, vsa, drives) - def _start_vcs(self, context, vsa, volumes=[]): + def _start_vcs(self, context, vsa, drives=[]): """Start VCs for VSA """ vsa_id = vsa['id'] @@ -127,11 +127,11 @@ class VsaManager(manager.SchedulerDependentManager): # in _separate_ loop go over all volumes and mark as "attached" has_failed_volumes = False - for volume in volumes: - vol_name = volume['name'] - vol_disp_name = volume['display_name'] - status = volume['status'] - LOG.info(_("VSA ID %(vsa_id)d: Volume %(vol_name)s "\ + for drive in drives: + vol_name = drive['name'] + vol_disp_name = drive['display_name'] + status = drive['status'] + LOG.info(_("VSA ID %(vsa_id)d: Drive %(vol_name)s "\ "(%(vol_disp_name)s) is in %(status)s state"), locals()) if status == 'available': @@ -149,11 +149,12 @@ class VsaManager(manager.SchedulerDependentManager): if has_failed_volumes: LOG.info(_("VSA ID %(vsa_id)d: Delete all BE volumes"), locals()) self.vsa_api.delete_vsa_volumes(context, vsa_id, "BE", True) - self.vsa_api.update_vsa_status(context, vsa_id, VsaState.FAILED) + self.vsa_api.update_vsa_status(context, vsa_id, + VsaState.FAILED) return # create user-data record for VC - storage_data = self.vsa_api.generate_user_data(context, vsa, volumes) + storage_data = vsa_utils.generate_user_data(vsa, drives) instance_type = instance_types.get_instance_type( vsa['instance_type_id']) @@ -174,4 +175,5 @@ class VsaManager(manager.SchedulerDependentManager): user_data=storage_data, metadata=dict(vsa_id=str(vsa_id))) - self.vsa_api.update_vsa_status(context, vsa_id, VsaState.CREATED) + self.vsa_api.update_vsa_status(context, vsa_id, + VsaState.CREATED) diff --git a/nova/vsa/utils.py b/nova/vsa/utils.py new file mode 100644 index 000000000..1de341ac5 --- /dev/null +++ b/nova/vsa/utils.py @@ -0,0 +1,80 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Zadara Storage Inc. +# Copyright (c) 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +from xml.etree import ElementTree + +from nova import flags + +FLAGS = flags.FLAGS + + +def generate_user_data(vsa, volumes): + SubElement = ElementTree.SubElement + + e_vsa = ElementTree.Element("vsa") + + e_vsa_detail = SubElement(e_vsa, "id") + e_vsa_detail.text = str(vsa['id']) + e_vsa_detail = SubElement(e_vsa, "name") + e_vsa_detail.text = vsa['display_name'] + e_vsa_detail = SubElement(e_vsa, "description") + e_vsa_detail.text = vsa['display_description'] + e_vsa_detail = SubElement(e_vsa, "vc_count") + e_vsa_detail.text = str(vsa['vc_count']) + + e_vsa_detail = SubElement(e_vsa, "auth_user") + e_vsa_detail.text = FLAGS.vsa_ec2_user_id + e_vsa_detail = SubElement(e_vsa, "auth_access_key") + e_vsa_detail.text = FLAGS.vsa_ec2_access_key + + e_volumes = SubElement(e_vsa, "volumes") + for volume in volumes: + + loc = volume['provider_location'] + if loc is None: + ip = '' + iscsi_iqn = '' + iscsi_portal = '' + else: + (iscsi_target, _sep, iscsi_iqn) = loc.partition(" ") + (ip, iscsi_portal) = iscsi_target.split(":", 1) + + e_vol = SubElement(e_volumes, "volume") + e_vol_detail = SubElement(e_vol, "id") + e_vol_detail.text = str(volume['id']) + e_vol_detail = SubElement(e_vol, "name") + e_vol_detail.text = volume['name'] + e_vol_detail = SubElement(e_vol, "display_name") + e_vol_detail.text = volume['display_name'] + e_vol_detail = SubElement(e_vol, "size_gb") + e_vol_detail.text = str(volume['size']) + e_vol_detail = SubElement(e_vol, "status") + e_vol_detail.text = volume['status'] + e_vol_detail = SubElement(e_vol, "ip") + e_vol_detail.text = ip + e_vol_detail = SubElement(e_vol, "iscsi_iqn") + e_vol_detail.text = iscsi_iqn + e_vol_detail = SubElement(e_vol, "iscsi_portal") + e_vol_detail.text = iscsi_portal + e_vol_detail = SubElement(e_vol, "lun") + e_vol_detail.text = '0' + e_vol_detail = SubElement(e_vol, "sn_host") + e_vol_detail.text = volume['host'] + + _xml = ElementTree.tostring(e_vsa) + return base64.b64encode(_xml) -- cgit From 59e9adb8e2ef39474a04ead76975a1fc3f913550 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 25 Aug 2011 19:09:50 -0700 Subject: cosmetic cleanup --- nova/api/openstack/contrib/virtual_storage_arrays.py | 6 +++--- nova/api/openstack/contrib/volumes.py | 14 +++++++------- nova/log.py | 6 +----- nova/tests/scheduler/test_vsa_scheduler.py | 1 - nova/tests/test_vsa.py | 1 - nova/tests/test_vsa_volumes.py | 2 -- nova/tests/test_xenapi.py | 1 - 7 files changed, 11 insertions(+), 20 deletions(-) diff --git a/nova/api/openstack/contrib/virtual_storage_arrays.py b/nova/api/openstack/contrib/virtual_storage_arrays.py index f3e4fc849..e09736a28 100644 --- a/nova/api/openstack/contrib/virtual_storage_arrays.py +++ b/nova/api/openstack/contrib/virtual_storage_arrays.py @@ -260,9 +260,9 @@ class VsaVolumeDriveController(volumes.VolumeController): def _translation(self, context, vol, vsa_id, details): if details: - translation = volumes.translate_volume_detail_view + translation = volumes._translate_volume_detail_view else: - translation = volumes.translate_volume_summary_view + translation = volumes._translate_volume_summary_view d = translation(context, vol) d['vsaId'] = vsa_id @@ -559,7 +559,7 @@ class Virtual_storage_arrays(extensions.ExtensionDescriptor): return "http://docs.openstack.org/ext/vsa/api/v1.1" def get_updated(self): - return "2011-06-29T00:00:00+00:00" + return "2011-08-25T00:00:00+00:00" def get_resources(self): resources = [] diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index 8c3898867..d62225e58 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -37,17 +37,17 @@ LOG = logging.getLogger("nova.api.volumes") FLAGS = flags.FLAGS -def translate_volume_detail_view(context, vol): +def _translate_volume_detail_view(context, vol): """Maps keys for volumes details view.""" - d = translate_volume_summary_view(context, vol) + d = _translate_volume_summary_view(context, vol) # No additional data / lookups at the moment return d -def translate_volume_summary_view(context, vol): +def _translate_volume_summary_view(context, vol): """Maps keys for volumes summary view.""" d = {} @@ -114,7 +114,7 @@ class VolumeController(object): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - return {'volume': translate_volume_detail_view(context, vol)} + return {'volume': _translate_volume_detail_view(context, vol)} def delete(self, req, id): """Delete a volume.""" @@ -130,11 +130,11 @@ class VolumeController(object): def index(self, req): """Returns a summary list of volumes.""" - return self._items(req, entity_maker=translate_volume_summary_view) + return self._items(req, entity_maker=_translate_volume_summary_view) def detail(self, req): """Returns a detailed list of volumes.""" - return self._items(req, entity_maker=translate_volume_detail_view) + return self._items(req, entity_maker=_translate_volume_detail_view) def _items(self, req, entity_maker): """Returns a list of volumes, transformed through entity_maker.""" @@ -175,7 +175,7 @@ class VolumeController(object): # Work around problem that instance is lazy-loaded... new_volume = self.volume_api.get(context, new_volume['id']) - retval = translate_volume_detail_view(context, new_volume) + retval = _translate_volume_detail_view(context, new_volume) return {'volume': retval} diff --git a/nova/log.py b/nova/log.py index eb0b6020f..222b8c5fb 100644 --- a/nova/log.py +++ b/nova/log.py @@ -32,7 +32,6 @@ import json import logging import logging.handlers import os -import stat import sys import traceback @@ -258,10 +257,7 @@ class NovaRootLogger(NovaLogger): self.filelog = WatchedFileHandler(logpath) self.addHandler(self.filelog) self.logpath = logpath - - st = os.stat(self.logpath) - if st.st_mode != (stat.S_IFREG | FLAGS.logfile_mode): - os.chmod(self.logpath, FLAGS.logfile_mode) + os.chmod(self.logpath, FLAGS.logfile_mode) else: self.removeHandler(self.filelog) self.addHandler(self.streamlog) diff --git a/nova/tests/scheduler/test_vsa_scheduler.py b/nova/tests/scheduler/test_vsa_scheduler.py index 309db96a2..37964f00d 100644 --- a/nova/tests/scheduler/test_vsa_scheduler.py +++ b/nova/tests/scheduler/test_vsa_scheduler.py @@ -210,7 +210,6 @@ class VsaSchedulerTestCase(test.TestCase): def setUp(self, sched_class=None): super(VsaSchedulerTestCase, self).setUp() self.stubs = stubout.StubOutForTesting() - self.context_non_admin = context.RequestContext(None, None) self.context = context.get_admin_context() if sched_class is None: diff --git a/nova/tests/test_vsa.py b/nova/tests/test_vsa.py index 300a4d71c..3d2d2de13 100644 --- a/nova/tests/test_vsa.py +++ b/nova/tests/test_vsa.py @@ -47,7 +47,6 @@ class VsaTestCase(test.TestCase): FLAGS.quota_volumes = 100 FLAGS.quota_gigabytes = 10000 - self.context_non_admin = context.RequestContext(None, None) self.context = context.get_admin_context() volume_types.create(self.context, diff --git a/nova/tests/test_vsa_volumes.py b/nova/tests/test_vsa_volumes.py index 43173d86a..b7cd4e840 100644 --- a/nova/tests/test_vsa_volumes.py +++ b/nova/tests/test_vsa_volumes.py @@ -36,8 +36,6 @@ class VsaVolumesTestCase(test.TestCase): self.stubs = stubout.StubOutForTesting() self.vsa_api = vsa.API() self.volume_api = volume.API() - - self.context_non_admin = context.RequestContext(None, None) self.context = context.get_admin_context() self.default_vol_type = self.vsa_api.get_vsa_volume_type(self.context) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 6d1958401..2f0559366 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -203,7 +203,6 @@ class XenAPIVMTestCase(test.TestCase): self.context = context.RequestContext(self.user_id, self.project_id) self.conn = xenapi_conn.get_connection(False) - @test.skip_test("Skip this test meanwhile") def test_parallel_builds(self): stubs.stubout_loopingcall_delay(self.stubs) -- cgit From eecb6ce2acee168713177459942e405b099fb25a Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Thu, 25 Aug 2011 19:47:12 -0700 Subject: driver: added vsa_id parameter for SN call --- nova/volume/driver.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 7a02a7c14..35e3ea8d0 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -850,23 +850,25 @@ class ZadaraBEDriver(ISCSIDriver): # Set the qos-str to default type sas qosstr = 'SAS_1000' - LOG.debug(_("\tvolume_type_id=%s"), volume['volume_type_id']) - volume_type = volume_types.get_volume_type(None, volume['volume_type_id']) - - LOG.debug(_("\tvolume_type=%s"), volume_type) - if volume_type is not None: qosstr = volume_type['extra_specs']['drive_type'] + \ ("_%s" % volume_type['extra_specs']['drive_size']) + vsa_id = None + for i in volume.get('volume_metadata'): + if i['key'] == 'to_vsa_id': + vsa_id = i['value'] + break + try: self._sync_exec('/var/lib/zadara/bin/zadara_sncfg', 'create_qospart', '--qos', qosstr, '--pname', volume['name'], '--psize', sizestr, + '--vsaid', vsa_id, run_as_root=True, check_exit_code=0) except exception.ProcessExecutionError: -- cgit From 5dc7956eed749c33b6cfaaaf122e829feec62ea9 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Fri, 26 Aug 2011 09:54:53 -0400 Subject: Update compute API and manager so that the image_ref is set before spawning the rebuilt instance. Fixes issue where rebuild didn't actually change the image_id. --- nova/compute/api.py | 3 +-- nova/compute/manager.py | 14 +++----------- nova/tests/integrated/test_servers.py | 3 ++- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 60a13631a..3b4bde8ea 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1035,7 +1035,7 @@ class API(base.Base): files_to_inject = files_to_inject or [] self._check_injected_file_quota(context, files_to_inject) - values = {} + values = {"image_ref": image_href} if metadata is not None: self._check_metadata_properties_quota(context, metadata) values['metadata'] = metadata @@ -1045,7 +1045,6 @@ class API(base.Base): rebuild_params = { "new_pass": admin_password, - "image_ref": image_href, "injected_files": files_to_inject, } diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ade15e310..6fcb3786c 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -201,11 +201,6 @@ class ComputeManager(manager.SchedulerDependentManager): data = {'launched_at': launched_at or utils.utcnow()} self.db.instance_update(context, instance_id, data) - def _update_image_ref(self, context, instance_id, image_ref): - """Update the image_id for the given instance.""" - data = {'image_ref': image_ref} - self.db.instance_update(context, instance_id, data) - def get_console_topic(self, context, **kwargs): """Retrieves the console host for a project on this host. @@ -526,7 +521,7 @@ class ComputeManager(manager.SchedulerDependentManager): :param context: `nova.RequestContext` object :param instance_id: Instance identifier (integer) - :param image_ref: Image identifier (href or integer) + :param injected_files: Files to inject :param new_pass: password to set on rebuilt instance """ context = context.elevated() @@ -539,8 +534,6 @@ class ComputeManager(manager.SchedulerDependentManager): network_info = self._get_instance_nw_info(context, instance_ref) self.driver.destroy(instance_ref, network_info) - image_ref = kwargs.get('image_ref') - instance_ref.image_ref = image_ref instance_ref.injected_files = kwargs.get('injected_files', []) network_info = self.network_api.get_instance_nw_info(context, instance_ref) @@ -552,11 +545,10 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.spawn(context, instance_ref, network_info, bd_mapping) - self._update_image_ref(context, instance_id, image_ref) self._update_launched_at(context, instance_id) self._update_state(context, instance_id) - usage_info = utils.usage_from_instance(instance_ref, - image_ref=image_ref) + usage_info = utils.usage_from_instance(instance_ref) + notifier.notify('compute.%s' % self.host, 'compute.instance.rebuild', notifier.INFO, diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index c2f800689..b9382038a 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -193,7 +193,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # rebuild the server with metadata post = {} post['rebuild'] = { - "imageRef": "https://localhost/v1.1/32278/images/2", + "imageRef": "https://localhost/v1.1/32278/images/3", "name": "blah", } @@ -205,6 +205,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase): self.assertEqual(created_server_id, found_server['id']) self.assertEqual({}, found_server.get('metadata')) self.assertEqual('blah', found_server.get('name')) + self.assertEqual('3', found_server.get('image')['id']) # Cleanup self._delete_server(created_server_id) -- cgit From 96a1b218d1d1d24853df3eceff11ba7676cd48ae Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 26 Aug 2011 11:14:44 -0700 Subject: added debug prints for scheduler --- nova/scheduler/vsa.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py index ad5ebc2dc..6962dd86b 100644 --- a/nova/scheduler/vsa.py +++ b/nova/scheduler/vsa.py @@ -272,7 +272,7 @@ class VsaScheduler(simple.SimpleScheduler): self._consume_resource(qos_cap, vol['size'], -1) def schedule_create_volumes(self, context, request_spec, - availability_zone, *_args, **_kwargs): + availability_zone=None, *_args, **_kwargs): """Picks hosts for hosting multiple volumes.""" num_volumes = request_spec.get('num_volumes') @@ -285,6 +285,8 @@ class VsaScheduler(simple.SimpleScheduler): host = self._check_host_enforcement(context, availability_zone) try: + self._print_capabilities_info() + self._assign_hosts_to_volumes(context, volume_params, host) for vol in volume_params: @@ -324,6 +326,8 @@ class VsaScheduler(simple.SimpleScheduler): return super(VsaScheduler, self).schedule_create_volume(context, volume_id, *_args, **_kwargs) + self._print_capabilities_info() + drive_type = { 'name': volume_type['extra_specs'].get('drive_name'), 'type': volume_type['extra_specs'].get('drive_type'), @@ -398,6 +402,26 @@ class VsaScheduler(simple.SimpleScheduler): self._consume_partition(qos_values, GB_TO_BYTES(size), direction) return + def _print_capabilities_info(self): + host_list = self._get_service_states().iteritems() + for host, host_dict in host_list: + for service_name, service_dict in host_dict.iteritems(): + if service_name != "volume": + continue + + LOG.info(_("Host %s:"), host) + + gos_info = service_dict.get('drive_qos_info', {}) + for qosgrp, qos_values in gos_info.iteritems(): + total = qos_values['TotalDrives'] + used = qos_values['FullDrive']['NumOccupiedDrives'] + free = qos_values['FullDrive']['NumFreeDrives'] + avail = BYTES_TO_GB(qos_values['AvailableCapacity']) + + LOG.info(_("\tDrive %(qosgrp)-25s: total %(total)2s, "\ + "used %(used)2s, free %(free)2s. Available "\ + "capacity %(avail)-5s"), locals()) + class VsaSchedulerLeastUsedHost(VsaScheduler): """ -- cgit From b846d22937ac62549832e16105ed06a21a3e34d0 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Fri, 26 Aug 2011 15:36:33 -0400 Subject: Tiny tweaks to the migration script. --- .../versions/043_update_instance_states.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py index 65bdf601d..e58ae5362 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py @@ -33,35 +33,35 @@ c_task_state = Column('task_state', _upgrade_translations = { "stopping": { - "vm_state": vm_states.ACTIVE, + "state_description": vm_states.ACTIVE, "task_state": task_states.STOPPING, }, "stopped": { - "vm_state": vm_states.STOPPED, + "state_description": vm_states.STOPPED, "task_state": None, }, "terminated": { - "vm_state": vm_states.DELETED, + "state_description": vm_states.DELETED, "task_state": None, }, "terminating": { - "vm_state": vm_states.ACTIVE, + "state_description": vm_states.ACTIVE, "task_state": task_states.DELETING, }, "running": { - "vm_state": vm_states.ACTIVE, + "state_description": vm_states.ACTIVE, "task_state": None, }, "scheduling": { - "vm_state": vm_states.BUILDING, + "state_description": vm_states.BUILDING, "task_state": task_states.SCHEDULING, }, "migrating": { - "vm_state": vm_states.MIGRATING, + "state_description": vm_states.MIGRATING, "task_state": None, }, "pending": { - "vm_state": vm_states.BUILDING, + "state_description": vm_states.BUILDING, "task_state": task_states.SCHEDULING, }, } @@ -106,9 +106,6 @@ def upgrade(migrate_engine): instance_table.create_column(c_task_state) - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - for old_state, values in _upgrade_translations.iteritems(): instance_table.update().\ values(**values).\ @@ -135,7 +132,7 @@ def downgrade(migrate_engine): instance_table.update().\ where(c_task_state == old_task_state).\ where(c_vm_state == old_vm_state).\ - values(state_description=new_state_desc).\ + values(vm_state=new_state_desc).\ execute() instance_table.drop_column('task_state') -- cgit From 6f467a94e3f7bdab41ebdcb7b987ca5544bfe321 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 26 Aug 2011 13:55:43 -0700 Subject: removed create_volumes, added log & doc comment about experimental code --- nova/volume/manager.py | 4 ---- nova/vsa/api.py | 6 ++++++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 63656d485..caa5298d4 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -93,10 +93,6 @@ class VolumeManager(manager.SchedulerDependentManager): else: LOG.info(_("volume %s: skipping export"), volume['name']) - def create_volumes(self, context, request_spec, availability_zone): - LOG.info(_("create_volumes called with req=%(request_spec)s, "\ - "availability_zone=%(availability_zone)s"), locals()) - def create_volume(self, context, volume_id, snapshot_id=None): """Creates and exports the volume.""" context = context.elevated() diff --git a/nova/vsa/api.py b/nova/vsa/api.py index b279255d7..18cf13705 100644 --- a/nova/vsa/api.py +++ b/nova/vsa/api.py @@ -17,6 +17,10 @@ """ Handles all requests relating to Virtual Storage Arrays (VSAs). + +Experimental code. Requires special VSA image. +For assistance and guidelines pls contact + Zadara Storage Inc & Openstack community """ import sys @@ -142,6 +146,8 @@ class API(base.Base): For shared storage disks split into partitions """ + LOG.info(_("*** Experimental VSA code ***")) + if vc_count > FLAGS.max_vcs_in_vsa: LOG.warning(_("Requested number of VCs (%d) is too high."\ " Setting to default"), vc_count) -- cgit From 209334e4740087aa0fd4b1aac8fcaf1a74ff7220 Mon Sep 17 00:00:00 2001 From: "vladimir.p" Date: Fri, 26 Aug 2011 15:07:34 -0700 Subject: changed format string in nova-manage --- bin/nova-manage | 152 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 78 insertions(+), 74 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 76e188596..c9cf4266d 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -1108,67 +1108,71 @@ class VsaCommands(object): self.vsa_api = vsa.API() self.context = context.get_admin_context() - self._format_str_vsa = "%-5s %-15s %-25s %-10s %-6s "\ - "%-9s %-10s %-10s %10s" - self._format_str_volume = "\t%-4s %-15s %-5s %-10s %-20s %s" - self._format_str_drive = "\t%-4s %-15s %-5s %-10s %-20s %-4s %-10s %s" - self._format_str_instance = "\t%-4s %-10s %-20s %-12s %-10s "\ - "%-15s %-15s %-10s %-15s %s" + self._format_str_vsa = "%(id)-5s %(vsa_id)-15s %(name)-25s "\ + "%(type)-10s %(vcs)-6s %(drives)-9s %(stat)-10s "\ + "%(az)-10s %(time)-10s" + self._format_str_volume = "\t%(id)-4s %(name)-15s %(size)-5s "\ + "%(stat)-10s %(att)-20s %(time)s" + self._format_str_drive = "\t%(id)-4s %(name)-15s %(size)-5s "\ + "%(stat)-10s %(host)-20s %(type)-4s %(tname)-10s %(time)s" + self._format_str_instance = "\t%(id)-4s %(name)-10s %(dname)-20s "\ + "%(image)-12s %(type)-10s %(fl_ip)-15s %(fx_ip)-15s "\ + "%(stat)-10s %(host)-15s %(time)s" def _print_vsa_header(self): print self._format_str_vsa %\ - (_('ID'), - _('vsa_id'), - _('displayName'), - _('vc_type'), - _('vc_cnt'), - _('drive_cnt'), - _('status'), - _('AZ'), - _('createTime')) + dict(id=_('ID'), + vsa_id=_('vsa_id'), + name=_('displayName'), + type=_('vc_type'), + vcs=_('vc_cnt'), + drives=_('drive_cnt'), + stat=_('status'), + az=_('AZ'), + time=_('createTime')) def _print_vsa(self, vsa): print self._format_str_vsa %\ - (vsa['id'], - vsa['name'], - vsa['display_name'], - vsa['vsa_instance_type'].get('name', None), - vsa['vc_count'], - vsa['vol_count'], - vsa['status'], - vsa['availability_zone'], - str(vsa['created_at'])) + dict(id=vsa['id'], + vsa_id=vsa['name'], + name=vsa['display_name'], + type=vsa['vsa_instance_type'].get('name', None), + vcs=vsa['vc_count'], + drives=vsa['vol_count'], + stat=vsa['status'], + az=vsa['availability_zone'], + time=str(vsa['created_at'])) def _print_volume_header(self): print _(' === Volumes ===') print self._format_str_volume %\ - (_('ID'), - _('name'), - _('size'), - _('status'), - _('attachment'), - _('createTime')) + dict(id=_('ID'), + name=_('name'), + size=_('size'), + stat=_('status'), + att=_('attachment'), + time=_('createTime')) def _print_volume(self, vol): print self._format_str_volume %\ - (vol['id'], - vol['display_name'] or vol['name'], - vol['size'], - vol['status'], - vol['attach_status'], - str(vol['created_at'])) + dict(id=vol['id'], + name=vol['display_name'] or vol['name'], + size=vol['size'], + stat=vol['status'], + att=vol['attach_status'], + time=str(vol['created_at'])) def _print_drive_header(self): print _(' === Drives ===') print self._format_str_drive %\ - (_('ID'), - _('name'), - _('size'), - _('status'), - _('host'), - _('type'), - _('typeName'), - _('createTime')) + dict(id=_('ID'), + name=_('name'), + size=_('size'), + stat=_('status'), + host=_('host'), + type=_('type'), + tname=_('typeName'), + time=_('createTime')) def _print_drive(self, drive): if drive['volume_type_id'] is not None and drive.get('volume_type'): @@ -1177,28 +1181,28 @@ class VsaCommands(object): drive_type_name = '' print self._format_str_drive %\ - (drive['id'], - drive['display_name'], - drive['size'], - drive['status'], - drive['host'], - drive['volume_type_id'], - drive_type_name, - str(drive['created_at'])) + dict(id=drive['id'], + name=drive['display_name'], + size=drive['size'], + stat=drive['status'], + host=drive['host'], + type=drive['volume_type_id'], + tname=drive_type_name, + time=str(drive['created_at'])) def _print_instance_header(self): print _(' === Instances ===') print self._format_str_instance %\ - (_('ID'), - _('name'), - _('disp_name'), - _('image'), - _('type'), - _('floating_IP'), - _('fixed_IP'), - _('status'), - _('host'), - _('createTime')) + dict(id=_('ID'), + name=_('name'), + dname=_('disp_name'), + image=_('image'), + type=_('type'), + fl_ip=_('floating_IP'), + fx_ip=_('fixed_IP'), + stat=_('status'), + host=_('host'), + time=_('createTime')) def _print_instance(self, vc): @@ -1212,16 +1216,16 @@ class VsaCommands(object): floating_addr = floating_addr or fixed_addr print self._format_str_instance %\ - (vc['id'], - ec2utils.id_to_ec2_id(vc['id']), - vc['display_name'], - ('ami-%08x' % int(vc['image_ref'])), - vc['instance_type']['name'], - floating_addr, - fixed_addr, - vc['state_description'], - vc['host'], - str(vc['created_at'])) + dict(id=vc['id'], + name=ec2utils.id_to_ec2_id(vc['id']), + dname=vc['display_name'], + image=('ami-%08x' % int(vc['image_ref'])), + type=vc['instance_type']['name'], + fl_ip=floating_addr, + fx_ip=fixed_addr, + stat=vc['state_description'], + host=vc['host'], + time=str(vc['created_at'])) def _list(self, context, vsas, print_drives=False, print_volumes=False, print_instances=False): @@ -1283,7 +1287,7 @@ class VsaCommands(object): try: project_id = os.getenv("EC2_ACCESS_KEY").split(':')[1] except Exception as exc: - print _("Failed to retrieve project id: %(exc)s") % locals() + print _("Failed to retrieve project id: %(exc)s") % exc raise if user_id is None: @@ -1291,7 +1295,7 @@ class VsaCommands(object): project = self.manager.get_project(project_id) user_id = project.project_manager_id except Exception as exc: - print _("Failed to retrieve user info: %(exc)s") % locals() + print _("Failed to retrieve user info: %(exc)s") % exc raise is_admin = self.manager.is_admin(user_id) -- cgit From 8bd8103c86fc021ff86b923883b66371052b3f93 Mon Sep 17 00:00:00 2001 From: Trey Morris Date: Fri, 26 Aug 2011 17:32:44 -0500 Subject: doubles quotes to single --- nova/network/manager.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/network/manager.py b/nova/network/manager.py index 404a3180e..b4605eea5 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -484,17 +484,17 @@ class NetworkManager(manager.SchedulerDependentManager): # TODO(tr3buchet) eventually "enabled" should be determined def ip_dict(ip): return { - "ip": ip, - "netmask": network["netmask"], - "enabled": "1"} + 'ip': ip, + 'netmask': network['netmask'], + 'enabled': '1'} def ip6_dict(): return { - "ip": ipv6.to_global(network['cidr_v6'], + 'ip': ipv6.to_global(network['cidr_v6'], vif['address'], network['project_id']), - "netmask": network['netmask_v6'], - "enabled": "1"} + 'netmask': network['netmask_v6'], + 'enabled': '1'} network_dict = { 'bridge': network['bridge'], 'id': network['id'], -- cgit From 8b44cedcc099542e6485a33764cece4c141fd4ab Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 26 Aug 2011 15:40:04 -0700 Subject: start of kombu implementation, keeping the same RPC interfaces --- nova/rpc/__init__.py | 25 +- nova/rpc/amqp.py | 593 ---------------------------------------------- nova/rpc/impl_carrot.py | 607 ++++++++++++++++++++++++++++++++++++++++++++++++ nova/rpc/impl_kombu.py | 426 +++++++++++++++++++++++++++++++++ 4 files changed, 1045 insertions(+), 606 deletions(-) delete mode 100644 nova/rpc/amqp.py create mode 100644 nova/rpc/impl_carrot.py create mode 100644 nova/rpc/impl_kombu.py diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index bdf7f705b..f102cf0fa 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,10 +23,18 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'nova.rpc.amqp', - "The messaging module to use, defaults to AMQP.") + 'carrot', + "The messaging module to use, defaults to carrot.") -RPCIMPL = import_object(FLAGS.rpc_backend) +impl_table = {'kombu': 'nova.rpc.impl_kombu', + 'amqp': 'nova.rpc.impl_kombu'} + 'carrot': 'nova.rpc.impl_carrot'} + + +# rpc_backend can be a short name like 'kombu', or it can be the full +# module name +RPCIMPL = import_object(impl_table.get(FLAGS.rpc_backend, + FLAGS.rpc_backend)) def create_connection(new=True): @@ -34,16 +42,7 @@ def create_connection(new=True): def create_consumer(conn, topic, proxy, fanout=False): - if fanout: - return RPCIMPL.FanoutAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) - else: - return RPCIMPL.TopicAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) + return RPCIMPL.create_consumer(conn, topic, proxy, fanout) def create_consumer_set(conn, consumers): diff --git a/nova/rpc/amqp.py b/nova/rpc/amqp.py deleted file mode 100644 index fe429b266..000000000 --- a/nova/rpc/amqp.py +++ /dev/null @@ -1,593 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""AMQP-based RPC. - -Queues have consumers and publishers. - -No fan-out support yet. - -""" - -import json -import sys -import time -import traceback -import types -import uuid - -from carrot import connection as carrot_connection -from carrot import messaging -from eventlet import greenpool -from eventlet import pools -from eventlet import queue -import greenlet - -from nova import context -from nova import exception -from nova import fakerabbit -from nova import flags -from nova import log as logging -from nova import utils -from nova.rpc.common import RemoteError, LOG - - -FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') -flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') - - -class Connection(carrot_connection.BrokerConnection): - """Connection instance object.""" - - @classmethod - def instance(cls, new=True): - """Returns the instance.""" - if new or not hasattr(cls, '_instance'): - params = dict(hostname=FLAGS.rabbit_host, - port=FLAGS.rabbit_port, - ssl=FLAGS.rabbit_use_ssl, - userid=FLAGS.rabbit_userid, - password=FLAGS.rabbit_password, - virtual_host=FLAGS.rabbit_virtual_host) - - if FLAGS.fake_rabbit: - params['backend_cls'] = fakerabbit.Backend - - # NOTE(vish): magic is fun! - # pylint: disable=W0142 - if new: - return cls(**params) - else: - cls._instance = cls(**params) - return cls._instance - - @classmethod - def recreate(cls): - """Recreates the connection instance. - - This is necessary to recover from some network errors/disconnects. - - """ - try: - del cls._instance - except AttributeError, e: - # The _instance stuff is for testing purposes. Usually we don't use - # it. So don't freak out if it doesn't exist. - pass - return cls.instance() - - -class Pool(pools.Pool): - """Class that implements a Pool of Connections.""" - - # TODO(comstud): Timeout connections not used in a while - def create(self): - LOG.debug('Creating new connection') - return Connection.instance(new=True) - -# Create a ConnectionPool to use for RPC calls. We'll order the -# pool as a stack (LIFO), so that we can potentially loop through and -# timeout old unused connections at some point -ConnectionPool = Pool( - max_size=FLAGS.rpc_conn_pool_size, - order_as_stack=True) - - -class Consumer(messaging.Consumer): - """Consumer base class. - - Contains methods for connecting the fetch method to async loops. - - """ - - def __init__(self, *args, **kwargs): - for i in xrange(FLAGS.rabbit_max_retries): - if i > 0: - time.sleep(FLAGS.rabbit_retry_interval) - try: - super(Consumer, self).__init__(*args, **kwargs) - self.failed_connection = False - break - except Exception as e: # Catching all because carrot sucks - fl_host = FLAGS.rabbit_host - fl_port = FLAGS.rabbit_port - fl_intv = FLAGS.rabbit_retry_interval - LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is' - ' unreachable: %(e)s. Trying again in %(fl_intv)d' - ' seconds.') % locals()) - self.failed_connection = True - if self.failed_connection: - LOG.error(_('Unable to connect to AMQP server ' - 'after %d tries. Shutting down.'), - FLAGS.rabbit_max_retries) - sys.exit(1) - - def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - """Wraps the parent fetch with some logic for failed connection.""" - # TODO(vish): the logic for failed connections and logging should be - # refactored into some sort of connection manager object - try: - if self.failed_connection: - # NOTE(vish): connection is defined in the parent class, we can - # recreate it as long as we create the backend too - # pylint: disable=W0201 - self.connection = Connection.recreate() - self.backend = self.connection.create_backend() - self.declare() - return super(Consumer, self).fetch(no_ack, - auto_ack, - enable_callbacks) - if self.failed_connection: - LOG.error(_('Reconnected to queue')) - self.failed_connection = False - # NOTE(vish): This is catching all errors because we really don't - # want exceptions to be logged 10 times a second if some - # persistent failure occurs. - except Exception, e: # pylint: disable=W0703 - if not self.failed_connection: - LOG.exception(_('Failed to fetch message from queue: %s' % e)) - self.failed_connection = True - - def attach_to_eventlet(self): - """Only needed for unit tests!""" - timer = utils.LoopingCall(self.fetch, enable_callbacks=True) - timer.start(0.1) - return timer - - -class AdapterConsumer(Consumer): - """Calls methods on a proxy object based on method and args.""" - - def __init__(self, connection=None, topic='broadcast', proxy=None): - LOG.debug(_('Initing the Adapter Consumer for %s') % topic) - self.proxy = proxy - self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) - super(AdapterConsumer, self).__init__(connection=connection, - topic=topic) - self.register_callback(self.process_data) - - def process_data(self, message_data, message): - """Consumer callback to call a method on a proxy object. - - Parses the message for validity and fires off a thread to call the - proxy object method. - - Message data should be a dictionary with two keys: - method: string representing the method to call - args: dictionary of arg: value - - Example: {'method': 'echo', 'args': {'value': 42}} - - """ - LOG.debug(_('received %s') % message_data) - # This will be popped off in _unpack_context - msg_id = message_data.get('_msg_id', None) - ctxt = _unpack_context(message_data) - - method = message_data.get('method') - args = message_data.get('args', {}) - message.ack() - if not method: - # NOTE(vish): we may not want to ack here, but that means that bad - # messages stay in the queue indefinitely, so for now - # we just log the message and send an error string - # back to the caller - LOG.warn(_('no method for message: %s') % message_data) - if msg_id: - msg_reply(msg_id, - _('No method for message: %s') % message_data) - return - self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) - - @exception.wrap_exception() - def _process_data(self, msg_id, ctxt, method, args): - """Thread that maigcally looks for a method on the proxy - object and calls it. - """ - - node_func = getattr(self.proxy, str(method)) - node_args = dict((str(k), v) for k, v in args.iteritems()) - # NOTE(vish): magic is fun! - try: - rval = node_func(context=ctxt, **node_args) - if msg_id: - # Check if the result was a generator - if isinstance(rval, types.GeneratorType): - for x in rval: - msg_reply(msg_id, x, None) - else: - msg_reply(msg_id, rval, None) - - # This final None tells multicall that it is done. - msg_reply(msg_id, None, None) - elif isinstance(rval, types.GeneratorType): - # NOTE(vish): this iterates through the generator - list(rval) - except Exception as e: - logging.exception('Exception during message handling') - if msg_id: - msg_reply(msg_id, None, sys.exc_info()) - return - - -class TopicAdapterConsumer(AdapterConsumer): - """Consumes messages on a specific topic.""" - - exchange_type = 'topic' - - def __init__(self, connection=None, topic='broadcast', proxy=None): - self.queue = topic - self.routing_key = topic - self.exchange = FLAGS.control_exchange - self.durable = FLAGS.rabbit_durable_queues - super(TopicAdapterConsumer, self).__init__(connection=connection, - topic=topic, proxy=proxy) - - -class FanoutAdapterConsumer(AdapterConsumer): - """Consumes messages from a fanout exchange.""" - - exchange_type = 'fanout' - - def __init__(self, connection=None, topic='broadcast', proxy=None): - self.exchange = '%s_fanout' % topic - self.routing_key = topic - unique = uuid.uuid4().hex - self.queue = '%s_fanout_%s' % (topic, unique) - self.durable = False - # Fanout creates unique queue names, so we should auto-remove - # them when done, so they're not left around on restart. - # Also, we're the only one that should be consuming. exclusive - # implies auto_delete, so we'll just set that.. - self.exclusive = True - LOG.info(_('Created "%(exchange)s" fanout exchange ' - 'with "%(key)s" routing key'), - dict(exchange=self.exchange, key=self.routing_key)) - super(FanoutAdapterConsumer, self).__init__(connection=connection, - topic=topic, proxy=proxy) - - -class ConsumerSet(object): - """Groups consumers to listen on together on a single connection.""" - - def __init__(self, connection, consumer_list): - self.consumer_list = set(consumer_list) - self.consumer_set = None - self.enabled = True - self.init(connection) - - def init(self, conn): - if not conn: - conn = Connection.instance(new=True) - if self.consumer_set: - self.consumer_set.close() - self.consumer_set = messaging.ConsumerSet(conn) - for consumer in self.consumer_list: - consumer.connection = conn - # consumer.backend is set for us - self.consumer_set.add_consumer(consumer) - - def reconnect(self): - self.init(None) - - def wait(self, limit=None): - running = True - while running: - it = self.consumer_set.iterconsume(limit=limit) - if not it: - break - while True: - try: - it.next() - except StopIteration: - return - except greenlet.GreenletExit: - running = False - break - except Exception as e: - LOG.exception(_("Exception while processing consumer")) - self.reconnect() - # Break to outer loop - break - - def close(self): - self.consumer_set.close() - - -class Publisher(messaging.Publisher): - """Publisher base class.""" - pass - - -class TopicPublisher(Publisher): - """Publishes messages on a specific topic.""" - - exchange_type = 'topic' - - def __init__(self, connection=None, topic='broadcast'): - self.routing_key = topic - self.exchange = FLAGS.control_exchange - self.durable = FLAGS.rabbit_durable_queues - super(TopicPublisher, self).__init__(connection=connection) - - -class FanoutPublisher(Publisher): - """Publishes messages to a fanout exchange.""" - - exchange_type = 'fanout' - - def __init__(self, topic, connection=None): - self.exchange = '%s_fanout' % topic - self.queue = '%s_fanout' % topic - self.durable = False - self.auto_delete = True - LOG.info(_('Creating "%(exchange)s" fanout exchange'), - dict(exchange=self.exchange)) - super(FanoutPublisher, self).__init__(connection=connection) - - -class DirectConsumer(Consumer): - """Consumes messages directly on a channel specified by msg_id.""" - - exchange_type = 'direct' - - def __init__(self, connection=None, msg_id=None): - self.queue = msg_id - self.routing_key = msg_id - self.exchange = msg_id - self.durable = False - self.auto_delete = True - self.exclusive = True - super(DirectConsumer, self).__init__(connection=connection) - - -class DirectPublisher(Publisher): - """Publishes messages directly on a channel specified by msg_id.""" - - exchange_type = 'direct' - - def __init__(self, connection=None, msg_id=None): - self.routing_key = msg_id - self.exchange = msg_id - self.durable = False - self.auto_delete = True - super(DirectPublisher, self).__init__(connection=connection) - - -def msg_reply(msg_id, reply=None, failure=None): - """Sends a reply or an error on the channel signified by msg_id. - - Failure should be a sys.exc_info() tuple. - - """ - if failure: - message = str(failure[1]) - tb = traceback.format_exception(*failure) - LOG.error(_("Returning exception %s to caller"), message) - LOG.error(tb) - failure = (failure[0].__name__, str(failure[1]), tb) - - with ConnectionPool.item() as conn: - publisher = DirectPublisher(connection=conn, msg_id=msg_id) - try: - publisher.send({'result': reply, 'failure': failure}) - except TypeError: - publisher.send( - {'result': dict((k, repr(v)) - for k, v in reply.__dict__.iteritems()), - 'failure': failure}) - - publisher.close() - - -def _unpack_context(msg): - """Unpack context from msg.""" - context_dict = {} - for key in list(msg.keys()): - # NOTE(vish): Some versions of python don't like unicode keys - # in kwargs. - key = str(key) - if key.startswith('_context_'): - value = msg.pop(key) - context_dict[key[9:]] = value - context_dict['msg_id'] = msg.pop('_msg_id', None) - LOG.debug(_('unpacked context: %s'), context_dict) - return RpcContext.from_dict(context_dict) - - -def _pack_context(msg, context): - """Pack context into msg. - - Values for message keys need to be less than 255 chars, so we pull - context out into a bunch of separate keys. If we want to support - more arguments in rabbit messages, we may want to do the same - for args at some point. - - """ - context_d = dict([('_context_%s' % key, value) - for (key, value) in context.to_dict().iteritems()]) - msg.update(context_d) - - -class RpcContext(context.RequestContext): - def __init__(self, *args, **kwargs): - msg_id = kwargs.pop('msg_id', None) - self.msg_id = msg_id - super(RpcContext, self).__init__(*args, **kwargs) - - def reply(self, *args, **kwargs): - msg_reply(self.msg_id, *args, **kwargs) - - -def multicall(context, topic, msg): - """Make a call that returns multiple times.""" - LOG.debug(_('Making asynchronous call on %s ...'), topic) - msg_id = uuid.uuid4().hex - msg.update({'_msg_id': msg_id}) - LOG.debug(_('MSG_ID is %s') % (msg_id)) - _pack_context(msg, context) - - con_conn = ConnectionPool.get() - consumer = DirectConsumer(connection=con_conn, msg_id=msg_id) - wait_msg = MulticallWaiter(consumer) - consumer.register_callback(wait_msg) - - publisher = TopicPublisher(connection=con_conn, topic=topic) - publisher.send(msg) - publisher.close() - - return wait_msg - - -class MulticallWaiter(object): - def __init__(self, consumer): - self._consumer = consumer - self._results = queue.Queue() - self._closed = False - - def close(self): - self._closed = True - self._consumer.close() - ConnectionPool.put(self._consumer.connection) - - def __call__(self, data, message): - """Acks message and sets result.""" - message.ack() - if data['failure']: - self._results.put(RemoteError(*data['failure'])) - else: - self._results.put(data['result']) - - def __iter__(self): - return self.wait() - - def wait(self): - while True: - rv = None - while rv is None and not self._closed: - try: - rv = self._consumer.fetch(enable_callbacks=True) - except Exception: - self.close() - raise - time.sleep(0.01) - - result = self._results.get() - if isinstance(result, Exception): - self.close() - raise result - if result == None: - self.close() - raise StopIteration - yield result - - -def call(context, topic, msg): - """Sends a message on a topic and wait for a response.""" - rv = multicall(context, topic, msg) - # NOTE(vish): return the last result from the multicall - rv = list(rv) - if not rv: - return - return rv[-1] - - -def cast(context, topic, msg): - """Sends a message on a topic without waiting for a response.""" - LOG.debug(_('Making asynchronous cast on %s...'), topic) - _pack_context(msg, context) - with ConnectionPool.item() as conn: - publisher = TopicPublisher(connection=conn, topic=topic) - publisher.send(msg) - publisher.close() - - -def fanout_cast(context, topic, msg): - """Sends a message on a fanout exchange without waiting for a response.""" - LOG.debug(_('Making asynchronous fanout cast...')) - _pack_context(msg, context) - with ConnectionPool.item() as conn: - publisher = FanoutPublisher(topic, connection=conn) - publisher.send(msg) - publisher.close() - - -def generic_response(message_data, message): - """Logs a result and exits.""" - LOG.debug(_('response %s'), message_data) - message.ack() - sys.exit(0) - - -def send_message(topic, message, wait=True): - """Sends a message for testing.""" - msg_id = uuid.uuid4().hex - message.update({'_msg_id': msg_id}) - LOG.debug(_('topic is %s'), topic) - LOG.debug(_('message %s'), message) - - if wait: - consumer = messaging.Consumer(connection=Connection.instance(), - queue=msg_id, - exchange=msg_id, - auto_delete=True, - exchange_type='direct', - routing_key=msg_id) - consumer.register_callback(generic_response) - - publisher = messaging.Publisher(connection=Connection.instance(), - exchange=FLAGS.control_exchange, - durable=FLAGS.rabbit_durable_queues, - exchange_type='topic', - routing_key=topic) - publisher.send(message) - publisher.close() - - if wait: - consumer.wait() - consumer.close() - - -if __name__ == '__main__': - # You can send messages from the command line using - # topic and a json string representing a dictionary - # for the method - send_message(sys.argv[1], json.loads(sys.argv[2])) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py new file mode 100644 index 000000000..529f98722 --- /dev/null +++ b/nova/rpc/impl_carrot.py @@ -0,0 +1,607 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""AMQP-based RPC. + +Queues have consumers and publishers. + +No fan-out support yet. + +""" + +import json +import sys +import time +import traceback +import types +import uuid + +from carrot import connection as carrot_connection +from carrot import messaging +from eventlet import greenpool +from eventlet import pools +from eventlet import queue +import greenlet + +from nova import context +from nova import exception +from nova import fakerabbit +from nova import flags +from nova import log as logging +from nova import utils +from nova.rpc.common import RemoteError, LOG + + +FLAGS = flags.FLAGS +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') + + +class Connection(carrot_connection.BrokerConnection): + """Connection instance object.""" + + @classmethod + def instance(cls, new=True): + """Returns the instance.""" + if new or not hasattr(cls, '_instance'): + params = dict(hostname=FLAGS.rabbit_host, + port=FLAGS.rabbit_port, + ssl=FLAGS.rabbit_use_ssl, + userid=FLAGS.rabbit_userid, + password=FLAGS.rabbit_password, + virtual_host=FLAGS.rabbit_virtual_host) + + if FLAGS.fake_rabbit: + params['backend_cls'] = fakerabbit.Backend + + # NOTE(vish): magic is fun! + # pylint: disable=W0142 + if new: + return cls(**params) + else: + cls._instance = cls(**params) + return cls._instance + + @classmethod + def recreate(cls): + """Recreates the connection instance. + + This is necessary to recover from some network errors/disconnects. + + """ + try: + del cls._instance + except AttributeError, e: + # The _instance stuff is for testing purposes. Usually we don't use + # it. So don't freak out if it doesn't exist. + pass + return cls.instance() + + +class Pool(pools.Pool): + """Class that implements a Pool of Connections.""" + + # TODO(comstud): Timeout connections not used in a while + def create(self): + LOG.debug('Creating new connection') + return Connection.instance(new=True) + +# Create a ConnectionPool to use for RPC calls. We'll order the +# pool as a stack (LIFO), so that we can potentially loop through and +# timeout old unused connections at some point +ConnectionPool = Pool( + max_size=FLAGS.rpc_conn_pool_size, + order_as_stack=True) + + +class Consumer(messaging.Consumer): + """Consumer base class. + + Contains methods for connecting the fetch method to async loops. + + """ + + def __init__(self, *args, **kwargs): + for i in xrange(FLAGS.rabbit_max_retries): + if i > 0: + time.sleep(FLAGS.rabbit_retry_interval) + try: + super(Consumer, self).__init__(*args, **kwargs) + self.failed_connection = False + break + except Exception as e: # Catching all because carrot sucks + fl_host = FLAGS.rabbit_host + fl_port = FLAGS.rabbit_port + fl_intv = FLAGS.rabbit_retry_interval + LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is' + ' unreachable: %(e)s. Trying again in %(fl_intv)d' + ' seconds.') % locals()) + self.failed_connection = True + if self.failed_connection: + LOG.error(_('Unable to connect to AMQP server ' + 'after %d tries. Shutting down.'), + FLAGS.rabbit_max_retries) + sys.exit(1) + + def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): + """Wraps the parent fetch with some logic for failed connection.""" + # TODO(vish): the logic for failed connections and logging should be + # refactored into some sort of connection manager object + try: + if self.failed_connection: + # NOTE(vish): connection is defined in the parent class, we can + # recreate it as long as we create the backend too + # pylint: disable=W0201 + self.connection = Connection.recreate() + self.backend = self.connection.create_backend() + self.declare() + return super(Consumer, self).fetch(no_ack, + auto_ack, + enable_callbacks) + if self.failed_connection: + LOG.error(_('Reconnected to queue')) + self.failed_connection = False + # NOTE(vish): This is catching all errors because we really don't + # want exceptions to be logged 10 times a second if some + # persistent failure occurs. + except Exception, e: # pylint: disable=W0703 + if not self.failed_connection: + LOG.exception(_('Failed to fetch message from queue: %s' % e)) + self.failed_connection = True + + def attach_to_eventlet(self): + """Only needed for unit tests!""" + timer = utils.LoopingCall(self.fetch, enable_callbacks=True) + timer.start(0.1) + return timer + + +class AdapterConsumer(Consumer): + """Calls methods on a proxy object based on method and args.""" + + def __init__(self, connection=None, topic='broadcast', proxy=None): + LOG.debug(_('Initing the Adapter Consumer for %s') % topic) + self.proxy = proxy + self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) + super(AdapterConsumer, self).__init__(connection=connection, + topic=topic) + self.register_callback(self.process_data) + + def process_data(self, message_data, message): + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ + LOG.debug(_('received %s') % message_data) + # This will be popped off in _unpack_context + msg_id = message_data.get('_msg_id', None) + ctxt = _unpack_context(message_data) + + method = message_data.get('method') + args = message_data.get('args', {}) + message.ack() + if not method: + # NOTE(vish): we may not want to ack here, but that means that bad + # messages stay in the queue indefinitely, so for now + # we just log the message and send an error string + # back to the caller + LOG.warn(_('no method for message: %s') % message_data) + if msg_id: + msg_reply(msg_id, + _('No method for message: %s') % message_data) + return + self.pool.spawn_n(self._process_data, msg_id, ctxt, method, args) + + @exception.wrap_exception() + def _process_data(self, msg_id, ctxt, method, args): + """Thread that maigcally looks for a method on the proxy + object and calls it. + """ + + node_func = getattr(self.proxy, str(method)) + node_args = dict((str(k), v) for k, v in args.iteritems()) + # NOTE(vish): magic is fun! + try: + rval = node_func(context=ctxt, **node_args) + if msg_id: + # Check if the result was a generator + if isinstance(rval, types.GeneratorType): + for x in rval: + msg_reply(msg_id, x, None) + else: + msg_reply(msg_id, rval, None) + + # This final None tells multicall that it is done. + msg_reply(msg_id, None, None) + elif isinstance(rval, types.GeneratorType): + # NOTE(vish): this iterates through the generator + list(rval) + except Exception as e: + logging.exception('Exception during message handling') + if msg_id: + msg_reply(msg_id, None, sys.exc_info()) + return + + +class TopicAdapterConsumer(AdapterConsumer): + """Consumes messages on a specific topic.""" + + exchange_type = 'topic' + + def __init__(self, connection=None, topic='broadcast', proxy=None): + self.queue = topic + self.routing_key = topic + self.exchange = FLAGS.control_exchange + self.durable = FLAGS.rabbit_durable_queues + super(TopicAdapterConsumer, self).__init__(connection=connection, + topic=topic, proxy=proxy) + + +class FanoutAdapterConsumer(AdapterConsumer): + """Consumes messages from a fanout exchange.""" + + exchange_type = 'fanout' + + def __init__(self, connection=None, topic='broadcast', proxy=None): + self.exchange = '%s_fanout' % topic + self.routing_key = topic + unique = uuid.uuid4().hex + self.queue = '%s_fanout_%s' % (topic, unique) + self.durable = False + # Fanout creates unique queue names, so we should auto-remove + # them when done, so they're not left around on restart. + # Also, we're the only one that should be consuming. exclusive + # implies auto_delete, so we'll just set that.. + self.exclusive = True + LOG.info(_('Created "%(exchange)s" fanout exchange ' + 'with "%(key)s" routing key'), + dict(exchange=self.exchange, key=self.routing_key)) + super(FanoutAdapterConsumer, self).__init__(connection=connection, + topic=topic, proxy=proxy) + + +class ConsumerSet(object): + """Groups consumers to listen on together on a single connection.""" + + def __init__(self, connection, consumer_list): + self.consumer_list = set(consumer_list) + self.consumer_set = None + self.enabled = True + self.init(connection) + + def init(self, conn): + if not conn: + conn = Connection.instance(new=True) + if self.consumer_set: + self.consumer_set.close() + self.consumer_set = messaging.ConsumerSet(conn) + for consumer in self.consumer_list: + consumer.connection = conn + # consumer.backend is set for us + self.consumer_set.add_consumer(consumer) + + def reconnect(self): + self.init(None) + + def wait(self, limit=None): + running = True + while running: + it = self.consumer_set.iterconsume(limit=limit) + if not it: + break + while True: + try: + it.next() + except StopIteration: + return + except greenlet.GreenletExit: + running = False + break + except Exception as e: + LOG.exception(_("Exception while processing consumer")) + self.reconnect() + # Break to outer loop + break + + def close(self): + self.consumer_set.close() + + +class Publisher(messaging.Publisher): + """Publisher base class.""" + pass + + +class TopicPublisher(Publisher): + """Publishes messages on a specific topic.""" + + exchange_type = 'topic' + + def __init__(self, connection=None, topic='broadcast'): + self.routing_key = topic + self.exchange = FLAGS.control_exchange + self.durable = FLAGS.rabbit_durable_queues + super(TopicPublisher, self).__init__(connection=connection) + + +class FanoutPublisher(Publisher): + """Publishes messages to a fanout exchange.""" + + exchange_type = 'fanout' + + def __init__(self, topic, connection=None): + self.exchange = '%s_fanout' % topic + self.queue = '%s_fanout' % topic + self.durable = False + self.auto_delete = True + LOG.info(_('Creating "%(exchange)s" fanout exchange'), + dict(exchange=self.exchange)) + super(FanoutPublisher, self).__init__(connection=connection) + + +class DirectConsumer(Consumer): + """Consumes messages directly on a channel specified by msg_id.""" + + exchange_type = 'direct' + + def __init__(self, connection=None, msg_id=None): + self.queue = msg_id + self.routing_key = msg_id + self.exchange = msg_id + self.durable = False + self.auto_delete = True + self.exclusive = True + super(DirectConsumer, self).__init__(connection=connection) + + +class DirectPublisher(Publisher): + """Publishes messages directly on a channel specified by msg_id.""" + + exchange_type = 'direct' + + def __init__(self, connection=None, msg_id=None): + self.routing_key = msg_id + self.exchange = msg_id + self.durable = False + self.auto_delete = True + super(DirectPublisher, self).__init__(connection=connection) + + +def msg_reply(msg_id, reply=None, failure=None): + """Sends a reply or an error on the channel signified by msg_id. + + Failure should be a sys.exc_info() tuple. + + """ + if failure: + message = str(failure[1]) + tb = traceback.format_exception(*failure) + LOG.error(_("Returning exception %s to caller"), message) + LOG.error(tb) + failure = (failure[0].__name__, str(failure[1]), tb) + + with ConnectionPool.item() as conn: + publisher = DirectPublisher(connection=conn, msg_id=msg_id) + try: + publisher.send({'result': reply, 'failure': failure}) + except TypeError: + publisher.send( + {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure}) + + publisher.close() + + +def _unpack_context(msg): + """Unpack context from msg.""" + context_dict = {} + for key in list(msg.keys()): + # NOTE(vish): Some versions of python don't like unicode keys + # in kwargs. + key = str(key) + if key.startswith('_context_'): + value = msg.pop(key) + context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) + LOG.debug(_('unpacked context: %s'), context_dict) + return RpcContext.from_dict(context_dict) + + +def _pack_context(msg, context): + """Pack context into msg. + + Values for message keys need to be less than 255 chars, so we pull + context out into a bunch of separate keys. If we want to support + more arguments in rabbit messages, we may want to do the same + for args at some point. + + """ + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) + + +class RpcContext(context.RequestContext): + def __init__(self, *args, **kwargs): + msg_id = kwargs.pop('msg_id', None) + self.msg_id = msg_id + super(RpcContext, self).__init__(*args, **kwargs) + + def reply(self, *args, **kwargs): + msg_reply(self.msg_id, *args, **kwargs) + + +def multicall(context, topic, msg): + """Make a call that returns multiple times.""" + LOG.debug(_('Making asynchronous call on %s ...'), topic) + msg_id = uuid.uuid4().hex + msg.update({'_msg_id': msg_id}) + LOG.debug(_('MSG_ID is %s') % (msg_id)) + _pack_context(msg, context) + + con_conn = ConnectionPool.get() + consumer = DirectConsumer(connection=con_conn, msg_id=msg_id) + wait_msg = MulticallWaiter(consumer) + consumer.register_callback(wait_msg) + + publisher = TopicPublisher(connection=con_conn, topic=topic) + publisher.send(msg) + publisher.close() + + return wait_msg + + +class MulticallWaiter(object): + def __init__(self, consumer): + self._consumer = consumer + self._results = queue.Queue() + self._closed = False + + def close(self): + self._closed = True + self._consumer.close() + ConnectionPool.put(self._consumer.connection) + + def __call__(self, data, message): + """Acks message and sets result.""" + message.ack() + if data['failure']: + self._results.put(RemoteError(*data['failure'])) + else: + self._results.put(data['result']) + + def __iter__(self): + return self.wait() + + def wait(self): + while True: + rv = None + while rv is None and not self._closed: + try: + rv = self._consumer.fetch(enable_callbacks=True) + except Exception: + self.close() + raise + time.sleep(0.01) + + result = self._results.get() + if isinstance(result, Exception): + self.close() + raise result + if result == None: + self.close() + raise StopIteration + yield result + + +def create_consumer(conn, topic, proxy, fanout=False): + """Create a consumer that calls methods in the proxy""" + if fanout: + return FanoutAdapterConsumer( + connection=conn, + topic=topic, + proxy=proxy) + else: + return TopicAdapterConsumer( + connection=conn, + topic=topic, + proxy=proxy) + + +def call(context, topic, msg): + """Sends a message on a topic and wait for a response.""" + rv = multicall(context, topic, msg) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + LOG.debug(_('Making asynchronous cast on %s...'), topic) + _pack_context(msg, context) + with ConnectionPool.item() as conn: + publisher = TopicPublisher(connection=conn, topic=topic) + publisher.send(msg) + publisher.close() + + +def fanout_cast(context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + LOG.debug(_('Making asynchronous fanout cast...')) + _pack_context(msg, context) + with ConnectionPool.item() as conn: + publisher = FanoutPublisher(topic, connection=conn) + publisher.send(msg) + publisher.close() + + +def generic_response(message_data, message): + """Logs a result and exits.""" + LOG.debug(_('response %s'), message_data) + message.ack() + sys.exit(0) + + +def send_message(topic, message, wait=True): + """Sends a message for testing.""" + msg_id = uuid.uuid4().hex + message.update({'_msg_id': msg_id}) + LOG.debug(_('topic is %s'), topic) + LOG.debug(_('message %s'), message) + + if wait: + consumer = messaging.Consumer(connection=Connection.instance(), + queue=msg_id, + exchange=msg_id, + auto_delete=True, + exchange_type='direct', + routing_key=msg_id) + consumer.register_callback(generic_response) + + publisher = messaging.Publisher(connection=Connection.instance(), + exchange=FLAGS.control_exchange, + durable=FLAGS.rabbit_durable_queues, + exchange_type='topic', + routing_key=topic) + publisher.send(message) + publisher.close() + + if wait: + consumer.wait() + consumer.close() + + +if __name__ == '__main__': + # You can send messages from the command line using + # topic and a json string representing a dictionary + # for the method + send_message(sys.argv[1], json.loads(sys.argv[2])) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py new file mode 100644 index 000000000..e609227c9 --- /dev/null +++ b/nova/rpc/impl_kombu.py @@ -0,0 +1,426 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import flags +from nova import log as logging + +import kombu +import kombu.entity +import kombu.messaging +import kombu.connection +import itertools +import sys +import time +import uuid + + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.rpc') + + +class QueueBase(object): + """Queue base class.""" + + def __init__(self, channel, callback, tag, **kwargs): + """Init the queue. + + 'channel' is the amqp channel to use + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + queue name, exchange name, and other kombu options are + passed in here as a dictionary. + """ + self.callback = callback + self.tag = str(tag) + self.kwargs = kwargs + self.queue = None + self.reconnect(channel) + + def reconnect(self, channel): + """Re-create the queue after a rabbit reconnect""" + self.channel = channel + self.kwargs['channel'] = channel + self.queue = kombu.entity.Queue(**self.kwargs) + self.queue.declare() + + def consume(self, *args, **kwargs): + """Consume from this queue. + If a callback is specified in kwargs, use that. Otherwise, + use the callback passed during __init__() + + The callback will be called if a message was read off of the + queue. + + If kwargs['nowait'] is True, then this call will block until + a message is read. + + Messages will automatically be acked if the callback doesn't + raise an exception + """ + + options = {'consumer_tag': self.tag} + options['nowait'] = kwargs.get('nowait', False) + callback = kwargs.get('callback', self.callback) + if not callback: + raise ValueError("No callback defined") + + def _callback(raw_message): + message = self.channel.message_to_python(raw_message) + callback(message.payload) + message.ack() + + self.queue.consume(*args, callback=_callback, **options) + + def cancel(self): + """Cancel the consuming from the queue, if it has started""" + try: + self.queue.cancel(self.tag) + except KeyError, e: + # NOTE(comstud): Kludge to get around a amqplib bug + if str(e) != "u'%s'" % self.tag: + raise + self.queue = None + + +class DirectQueue(QueueBase): + """Queue/consumer class for 'direct'""" + + def __init__(self, channel, msg_id, callback, tag, **kwargs): + """Init a 'direct' queue. + + 'channel' is the amqp channel to use + 'msg_id' is the msg_id to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + # Default options + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=msg_id, + type='direct', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(DirectQueue, self).__init__( + channel, + callback, + tag, + name=msg_id, + exchange=exchange, + routing_key=msg_id, + **options) + + +class TopicQueue(QueueBase): + """Queue/consumer class for 'topic'""" + + def __init__(self, channel, topic, callback, tag, **kwargs): + """Init a 'topic' queue. + + 'channel' is the amqp channel to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + # Default options + options = {'durable': FLAGS.rabbit_durable_queues, + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=FLAGS.control_exchange, + type='topic', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(TopicQueue, self).__init__( + channel, + callback, + tag, + name=topic, + exchange=exchange, + routing_key=topic, + **options) + + +class FanoutQueue(QueueBase): + """Queue/consumer class for 'fanout'""" + + def __init__(self, channel, topic, callback, tag, **kwargs): + """Init a 'fanout' queue. + + 'channel' is the amqp channel to use + 'topic' is the topic to listen on + 'callback' is the callback to call when messages are received + 'tag' is a unique ID for the consumer on the channel + + Other kombu options may be passed + """ + unique = uuid.uuid4().hex + exchange_name = '%s_fanout' % topic + queue_name = '%s_fanout_%s' % (topic, unique) + + # Default options + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + exchange = kombu.entity.Exchange( + name=exchange_name, + type='fanout', + durable=options['durable'], + auto_delete=options['auto_delete']) + super(FanoutQueue, self).__init__( + channel, + callback, + tag, + name=queue_name, + exchange=exchange, + routing_key=topic, + **options) + + +class Publisher(object): + """Base Publisher class""" + + def __init__(self, channel, exchange_name, routing_key, **kwargs): + """Init the Publisher class with the exchange_name, routing_key, + and other options + """ + self.exchange_name = exchange_name + self.routing_key = routing_key + self.kwargs = kwargs + self.reconnect(channel) + + def reconnect(self, channel): + """Re-establish the Producer after a rabbit reconnection""" + self.exchange = kombu.entity.Exchange(name=self.exchange_name, + **self.kwargs) + self.producer = kombu.messaging.Producer(exchange=self.exchange, + channel=channel, routing_key=self.routing_key) + + def send(self, msg): + """Send a message""" + self.producer.publish(msg) + + +class DirectPublisher(Publisher): + """Publisher class for 'direct'""" + def __init__(self, channel, msg_id, **kwargs): + """init a 'direct' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + super(DirectPublisher, self).__init__(channel, + msg_id, + msg_id, + type='direct', + **options) + + +class TopicPublisher(Publisher): + """Publisher class for 'topic'""" + def __init__(self, channel, topic, **kwargs): + """init a 'topic' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': FLAGS.rabbit_durable_queues, + 'auto_delete': False, + 'exclusive': False} + options.update(kwargs) + super(TopicPublisher, self).__init__(channel, + FLAGS.control_exchange, + topic, + type='topic', + **options) + + +class FanoutPublisher(Publisher): + """Publisher class for 'fanout'""" + def __init__(self, channel, topic, **kwargs): + """init a 'fanout' publisher. + + Kombu options may be passed as keyword args to override defaults + """ + options = {'durable': False, + 'auto_delete': True, + 'exclusive': True} + options.update(kwargs) + super(FanoutPublisher, self).__init__(channel, + '%s_fanout' % topic, + None, + type='fanout', + **options) + + +class Connection(object): + """Connection instance object.""" + + def __init__(self): + self.queues = [] + self.max_retries = FLAGS.rabbit_max_retries + self.interval_start = FLAGS.rabbit_retry_interval + self.interval_stepping = 0 + self.interval_max = FLAGS.rabbit_retry_interval + + self.params = dict(hostname=FLAGS.rabbit_host, + port=FLAGS.rabbit_port, + userid=FLAGS.rabbit_userid, + password=FLAGS.rabbit_password, + virtual_host=FLAGS.rabbit_virtual_host) + if FLAGS.fake_rabbit: + self.params['transport'] = 'memory' + self.connection = None + self.reconnect() + + def reconnect(self): + """Handles reconnecting and re-estblishing queues""" + if self.connection: + try: + self.connection.close() + except self.connection.connection_errors: + pass + time.sleep(1) + self.connection = kombu.connection.Connection(**self.params) + self.queue_num = itertools.count(1) + + try: + self.connection.ensure_connection(errback=self.connect_error, + max_retries=self.max_retries, + interval_start=self.interval_start, + interval_step=self.interval_stepping, + interval_max=self.interval_max) + except self.connection.connection_errors, e: + err_str = str(e) + max_retries = FLAGS.rabbit_max_retries + LOG.error(_('Unable to connect to AMQP server ' + 'after %(max_retries)d tries: %(err_str)s') % locals()) + # NOTE(comstud): Original carrot code exits after so many + # attempts, but I wonder if we should re-try indefinitely + sys.exit(1) + LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % + self.params)) + self.channel = self.connection.channel() + for consumer in self.queues: + consumer.reconnect(self.channel) + if self.queues: + LOG.debug(_("Re-established AMQP queues")) + + def get_channel(self): + """Convenience call for bin/clear_rabbit_queues""" + return self.channel + + def connect_error(self, exc, interval): + """Callback when there are connection re-tries by kombu""" + info = self.params.copy() + info['intv'] = interval + info['e'] = exc + LOG.error(_('AMQP server on %(hostname)s:%(port)d is' + ' unreachable: %(e)s. Trying again in %(intv)d' + ' seconds.') % info) + + def close(self): + """Close/release this connection""" + self.connection.release() + self.connection = None + + def reset(self): + """Reset a connection so it can be used again""" + self.channel.close() + self.channel = self.connection.channel() + self.queues = [] + + def create_queue(self, queue_cls, topic, callback): + """Create a queue using the class that was passed in and + add it to our list of queues used for consuming + """ + self.queues.append(queue_cls(self.channel, topic, callback, + self.queue_num.next())) + + def consume(self, limit=None): + """Consume from all queues""" + while True: + try: + queues_head = self.queues[:-1] + queues_tail = self.queues[-1] + for queue in queues_head: + queue.consume(nowait=True) + queues_tail.consume(nowait=False) + + for iteration in itertools.count(0): + if limit and iteration >= limit: + raise StopIteration + yield self.connection.drain_events() + except self.connection.connection_errors, e: + LOG.exception(_('Failed to consume message from queue: ' + '%s' % str(e))) + self.reconnect() + + def publisher_send(self, cls, topic, msg): + """Send to a publisher based on the publisher class""" + while True: + publisher = None + try: + publisher = cls(self.channel, topic) + publisher.send(msg) + return + except self.connection.connection_errors, e: + LOG.exception(_('Failed to publish message %s' % str(e))) + try: + self.reconnect() + if publisher: + publisher.reconnect(self.channel) + except self.connection.connection_errors, e: + pass + + def direct_consumer(self, topic, callback): + """Create a 'direct' queue. + In nova's use, this is generally a msg_id queue used for + responses for call/multicall + """ + self.create_queue(DirectQueue, topic, callback) + + def topic_consumer(self, topic, callback=None): + """Create a 'topic' queue.""" + self.create_queue(TopicQueue, topic, callback) + + def fanout_consumer(self, topic, callback): + """Create a 'fanout' queue""" + self.create_queue(FanoutQueue, topic, callback) + + def direct_send(self, msg_id, msg): + """Send a 'direct' message""" + self.publisher_send(DirectPublisher, msg_id, msg) + + def topic_send(self, topic, msg): + """Send a 'topic' message""" + self.publisher_send(TopicPublisher, topic, msg) + + def fanout_send(self, topic, msg): + """Send a 'fanout' message""" + self.publisher_send(FanoutPublisher, topic, msg) -- cgit From 9cb46c48682657039173447f9689e15ed3ce15af Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 26 Aug 2011 15:59:15 -0700 Subject: more work done to restore original rpc interfaces. --- nova/rpc/FIXME | 2 + nova/rpc/__init__.py | 3 +- nova/rpc/impl_kombu.py | 307 ++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 306 insertions(+), 6 deletions(-) create mode 100644 nova/rpc/FIXME diff --git a/nova/rpc/FIXME b/nova/rpc/FIXME new file mode 100644 index 000000000..704081802 --- /dev/null +++ b/nova/rpc/FIXME @@ -0,0 +1,2 @@ +Move some code duplication between carrot/kombu into common.py +The other FIXMEs in __init__.py and impl_kombu.py diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index f102cf0fa..9371c2ab3 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -27,7 +27,7 @@ flags.DEFINE_string('rpc_backend', "The messaging module to use, defaults to carrot.") impl_table = {'kombu': 'nova.rpc.impl_kombu', - 'amqp': 'nova.rpc.impl_kombu'} + 'amqp': 'nova.rpc.impl_kombu', 'carrot': 'nova.rpc.impl_carrot'} @@ -46,6 +46,7 @@ def create_consumer(conn, topic, proxy, fanout=False): def create_consumer_set(conn, consumers): + # FIXME(comstud): replace however necessary return RPCIMPL.ConsumerSet(connection=conn, consumer_list=consumers) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index e609227c9..a222bb885 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -30,6 +30,11 @@ import uuid FLAGS = flags.FLAGS LOG = logging.getLogger('nova.rpc') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') + class QueueBase(object): """Queue base class.""" @@ -298,6 +303,16 @@ class Connection(object): self.connection = None self.reconnect() + @classmethod + def instance(cls, new=True): + """Returns the instance.""" + if new or not hasattr(cls, '_instance'): + if new: + return cls() + else: + cls._instance = cls() + return cls._instance + def reconnect(self): """Handles reconnecting and re-estblishing queues""" if self.connection: @@ -359,8 +374,10 @@ class Connection(object): """Create a queue using the class that was passed in and add it to our list of queues used for consuming """ - self.queues.append(queue_cls(self.channel, topic, callback, - self.queue_num.next())) + queue = queue_cls(self.channel, topic, callback, + self.queue_num.next()) + self.queues.append(queue) + return queue def consume(self, limit=None): """Consume from all queues""" @@ -403,15 +420,15 @@ class Connection(object): In nova's use, this is generally a msg_id queue used for responses for call/multicall """ - self.create_queue(DirectQueue, topic, callback) + return self.create_queue(DirectQueue, topic, callback) def topic_consumer(self, topic, callback=None): """Create a 'topic' queue.""" - self.create_queue(TopicQueue, topic, callback) + return self.create_queue(TopicQueue, topic, callback) def fanout_consumer(self, topic, callback): """Create a 'fanout' queue""" - self.create_queue(FanoutQueue, topic, callback) + return self.create_queue(FanoutQueue, topic, callback) def direct_send(self, msg_id, msg): """Send a 'direct' message""" @@ -424,3 +441,283 @@ class Connection(object): def fanout_send(self, topic, msg): """Send a 'fanout' message""" self.publisher_send(FanoutPublisher, topic, msg) + + +class Pool(pools.Pool): + """Class that implements a Pool of Connections.""" + + # TODO(comstud): Timeout connections not used in a while + def create(self): + LOG.debug('Creating new connection') + return RPCIMPL.Connection() + +# Create a ConnectionPool to use for RPC calls. We'll order the +# pool as a stack (LIFO), so that we can potentially loop through and +# timeout old unused connections at some point +ConnectionPool = Pool( + max_size=FLAGS.rpc_conn_pool_size, + order_as_stack=True) + + +class ConnectionContext(object): + def __init__(self, pooled=True): + self.connection = None + if pooled: + self.connection = ConnectionPool.get() + else: + self.connection = RPCIMPL.Connection() + self.pooled = pooled + + def __enter__(self): + return self + + def _done(self): + if self.connection: + if self.pooled: + # Reset the connection so it's ready for the next caller + # to grab from the pool + self.connection.reset() + ConnectionPool.put(self.connection) + else: + try: + self.connection.close() + except Exception: + # There's apparently a bug in kombu 'memory' transport + # which causes an assert failure. + # But, we probably want to ignore all exceptions when + # trying to close a connection, anyway... + pass + self.connection = None + + def __exit__(self, t, v, tb): + """end if 'with' statement. We're done here.""" + self._done() + + def __del__(self): + """Put Connection back into the pool if this ConnectionContext + is being deleted + """ + self._done() + + def close(self): + self._done() + + def __getattr__(self, key): + if self.connection: + return getattr(self.connection, key) + else: + raise exception.InvalidRPCConnectionReuse() + + +class ProxyCallback(object): + """Calls methods on a proxy object based on method and args.""" + + def __init__(self, proxy): + self.proxy = proxy + self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) + + def __call__(self, message_data): + """Consumer callback to call a method on a proxy object. + + Parses the message for validity and fires off a thread to call the + proxy object method. + + Message data should be a dictionary with two keys: + method: string representing the method to call + args: dictionary of arg: value + + Example: {'method': 'echo', 'args': {'value': 42}} + + """ + LOG.debug(_('received %s') % message_data) + ctxt = _unpack_context(message_data) + method = message_data.get('method') + args = message_data.get('args', {}) + if not method: + LOG.warn(_('no method for message: %s') % message_data) + ctxt.reply(_('No method for message: %s') % message_data) + return + self.pool.spawn_n(self._process_data, ctxt, method, args) + + @exception.wrap_exception() + def _process_data(self, ctxt, method, args): + """Thread that maigcally looks for a method on the proxy + object and calls it. + """ + + node_func = getattr(self.proxy, str(method)) + node_args = dict((str(k), v) for k, v in args.iteritems()) + # NOTE(vish): magic is fun! + try: + rval = node_func(context=ctxt, **node_args) + # Check if the result was a generator + if isinstance(rval, types.GeneratorType): + for x in rval: + ctxt.reply(x, None) + else: + ctxt.reply(rval, None) + # This final None tells multicall that it is done. + ctxt.reply(None, None) + except Exception as e: + logging.exception('Exception during message handling') + ctxt.reply(None, sys.exc_info()) + return + + +def _unpack_context(msg): + """Unpack context from msg.""" + context_dict = {} + for key in list(msg.keys()): + # NOTE(vish): Some versions of python don't like unicode keys + # in kwargs. + key = str(key) + if key.startswith('_context_'): + value = msg.pop(key) + context_dict[key[9:]] = value + context_dict['msg_id'] = msg.pop('_msg_id', None) + LOG.debug(_('unpacked context: %s'), context_dict) + return RpcContext.from_dict(context_dict) + + +def _pack_context(msg, context): + """Pack context into msg. + + Values for message keys need to be less than 255 chars, so we pull + context out into a bunch of separate keys. If we want to support + more arguments in rabbit messages, we may want to do the same + for args at some point. + + """ + context_d = dict([('_context_%s' % key, value) + for (key, value) in context.to_dict().iteritems()]) + msg.update(context_d) + + +class RpcContext(context.RequestContext): + def __init__(self, *args, **kwargs): + msg_id = kwargs.pop('msg_id', None) + self.msg_id = msg_id + super(RpcContext, self).__init__(*args, **kwargs) + + def reply(self, *args, **kwargs): + if self.msg_id: + msg_reply(self.msg_id, *args, **kwargs) + + +class MulticallWaiter(object): + def __init__(self, connection): + self._connection = connection + self._iterator = connection.consume() + self._result = None + self._done = False + + def done(self): + self._done = True + self._connection = None + + def __call__(self, data): + """The consume() callback will call this. Store the result.""" + if data['failure']: + self._result = RemoteError(*data['failure']) + else: + self._result = data['result'] + + def __iter__(self): + if self._done: + raise StopIteration + while True: + self._iterator.next() + result = self._result + if isinstance(result, Exception): + self.done() + raise result + if result == None: + self.done() + raise StopIteration + yield result + + +def create_consumer(conn, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + if fanout: + return conn.fanout_consumer(topic, ProxyCallback(proxy)) + else: + return conn.topic_consumer(topic, ProxyCallback(proxy)) + + +def create_consumer_set(conn, consumers): + # FIXME(comstud): Replace this however necessary + # Returns an object that you can call .wait() on to consume + # all queues? + # Needs to have a .close() which will stop consuming? + # Needs to also have an attach_to_eventlet method for tests? + raise NotImplemented + + +def multicall(context, topic, msg): + """Make a call that returns multiple times.""" + # Can't use 'with' for multicall, as it returns an iterator + # that will continue to use the connection. When it's done, + # connection.close() will get called which will put it back into + # the pool + LOG.debug(_('Making asynchronous call on %s ...'), topic) + msg_id = uuid.uuid4().hex + msg.update({'_msg_id': msg_id}) + LOG.debug(_('MSG_ID is %s') % (msg_id)) + _pack_context(msg, context) + + conn = ConnectionContext() + wait_msg = MulticallWaiter(conn) + conn.direct_consumer(msg_id, wait_msg) + conn.topic_send(topic, msg) + + return wait_msg + + +def call(context, topic, msg): + """Sends a message on a topic and wait for a response.""" + rv = multicall(context, topic, msg) + # NOTE(vish): return the last result from the multicall + rv = list(rv) + if not rv: + return + return rv[-1] + + +def cast(context, topic, msg): + """Sends a message on a topic without waiting for a response.""" + LOG.debug(_('Making asynchronous cast on %s...'), topic) + _pack_context(msg, context) + with ConnectionContext() as conn: + conn.topic_send(topic, msg) + + +def fanout_cast(context, topic, msg): + """Sends a message on a fanout exchange without waiting for a response.""" + LOG.debug(_('Making asynchronous fanout cast...')) + _pack_context(msg, context) + with ConnectionContext() as conn: + conn.fanout_send(topic, msg) + + +def msg_reply(msg_id, reply=None, failure=None): + """Sends a reply or an error on the channel signified by msg_id. + + Failure should be a sys.exc_info() tuple. + + """ + with ConnectionContext() as conn: + if failure: + message = str(failure[1]) + tb = traceback.format_exception(*failure) + LOG.error(_("Returning exception %s to caller"), message) + LOG.error(tb) + failure = (failure[0].__name__, str(failure[1]), tb) + + try: + msg = {'result': reply, 'failure': failure} + except TypeError: + msg = {'result': dict((k, repr(v)) + for k, v in reply.__dict__.iteritems()), + 'failure': failure} + conn.direct_send(msg_id, msg) -- cgit From 1c5031ca6f89de4ac8a7dac271aff9942fc9602a Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 26 Aug 2011 16:00:50 -0700 Subject: more fixes --- nova/rpc/impl_kombu.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index a222bb885..cfef421c6 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -15,7 +15,7 @@ # under the License. from nova import flags -from nova import log as logging +from nova.rpc.common import RemoteError, LOG import kombu import kombu.entity @@ -28,7 +28,6 @@ import uuid FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.rpc') flags.DEFINE_integer('rpc_conn_pool_size', 30, 'Size of RPC connection pool') @@ -559,7 +558,7 @@ class ProxyCallback(object): # This final None tells multicall that it is done. ctxt.reply(None, None) except Exception as e: - logging.exception('Exception during message handling') + LOG.exception('Exception during message handling') ctxt.reply(None, sys.exc_info()) return -- cgit From 3c835ebe4bb6e0aa61da2a2e44a6b19bc92fc72a Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Fri, 26 Aug 2011 16:04:34 -0700 Subject: flag for kombu connection backoff on retries --- nova/flags.py | 1 + nova/rpc/impl_kombu.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index 95000df1b..ac70386e7 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -303,6 +303,7 @@ DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') +DEFINE_integer('rabbit_interval_stepping', 2, 'rabbit connection retry backoff in seconds') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues') diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index cfef421c6..65199808e 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -289,7 +289,7 @@ class Connection(object): self.queues = [] self.max_retries = FLAGS.rabbit_max_retries self.interval_start = FLAGS.rabbit_retry_interval - self.interval_stepping = 0 + self.interval_stepping = FLAGS.rabbit_interval_stepping self.interval_max = FLAGS.rabbit_retry_interval self.params = dict(hostname=FLAGS.rabbit_host, -- cgit From 400427ab786779109d49b27eda2fe9e246503dd6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 26 Aug 2011 16:17:40 -0700 Subject: use db layer for aggregation --- nova/api/openstack/contrib/simple_tenant_usage.py | 72 ++++++-------------- nova/db/api.py | 11 +++- nova/db/sqlalchemy/api.py | 19 ++++-- .../openstack/contrib/test_simple_tenant_usage.py | 77 +++++++++------------- 4 files changed, 70 insertions(+), 109 deletions(-) diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index 5f4218237..16e712815 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -31,35 +31,7 @@ from webob import exc FLAGS = flags.FLAGS -INSTANCE_FIELDS = ['id', - 'image_ref', - 'project_id', - 'user_id', - 'display_name', - 'state_description', - 'instance_type_id', - 'launched_at', - 'terminated_at'] - - class SimpleTenantUsageController(object): - def _get_instances_for_time_period(self, period_start, period_stop, - tenant_id): - tenant_clause = '' - if tenant_id: - tenant_clause = " and project_id='%s'" % tenant_id - - conn = get_session().connection() - rows = conn.execute("select %s from instances where \ - (terminated_at is NULL or terminated_at > '%s') \ - and (launched_at < '%s') %s" %\ - (','.join(INSTANCE_FIELDS), - period_start.isoformat(' '),\ - period_stop.isoformat(' '), - tenant_clause)).fetchall() - - return rows - def _hours_for(self, instance, period_start, period_stop): launched_at = instance['launched_at'] terminated_at = instance['terminated_at'] @@ -99,62 +71,58 @@ class SimpleTenantUsageController(object): def _tenant_usages_for_period(self, context, period_start, period_stop, tenant_id=None, detailed=True): - rows = self._get_instances_for_time_period(period_start, - period_stop, - tenant_id) + instances = db.instance_get_active_by_window(context, + period_start, + period_stop, + tenant_id, + fast=True) + from nova import log as logging + logging.info(instances) rval = {} flavors = {} - for row in rows: + for instance in instances: info = {} - for i in range(len(INSTANCE_FIELDS)): - info[INSTANCE_FIELDS[i]] = row[i] - info['hours'] = self._hours_for(info, period_start, period_stop) - flavor_type = info['instance_type_id'] + info['hours'] = self._hours_for(instance, + period_start, + period_stop) + flavor_type = instance['instance_type_id'] if not flavors.get(flavor_type): try: flavors[flavor_type] = db.instance_type_get(context, - info['instance_type_id']) + flavor_type) except exception.InstanceTypeNotFound: # can't bill if there is no instance type continue flavor = flavors[flavor_type] - info['name'] = info['display_name'] - del(info['display_name']) + info['name'] = instance['display_name'] info['memory_mb'] = flavor['memory_mb'] info['local_gb'] = flavor['local_gb'] info['vcpus'] = flavor['vcpus'] - info['tenant_id'] = info['project_id'] - del(info['project_id']) + info['tenant_id'] = instance['project_id'] info['flavor'] = flavor['name'] - del(info['instance_type_id']) - info['started_at'] = info['launched_at'] - del(info['launched_at']) + info['started_at'] = instance['launched_at'] - info['ended_at'] = info['terminated_at'] - del(info['terminated_at']) + info['ended_at'] = instance['terminated_at'] if info['ended_at']: info['state'] = 'terminated' else: - info['state'] = info['state_description'] - - del(info['state_description']) + info['state'] = instance['state_description'] now = datetime.utcnow() if info['state'] == 'terminated': - delta = self._parse_datetime(info['ended_at'])\ - - self._parse_datetime(info['started_at']) + delta = info['ended_at'] - info['started_at'] else: - delta = now - self._parse_datetime(info['started_at']) + delta = now - info['started_at'] info['uptime'] = delta.days * 24 * 60 + delta.seconds diff --git a/nova/db/api.py b/nova/db/api.py index 3bb9b4970..f443239c1 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -495,9 +495,14 @@ def instance_get_all_by_filters(context, filters): return IMPL.instance_get_all_by_filters(context, filters) -def instance_get_active_by_window(context, begin, end=None): - """Get instances active during a certain time window.""" - return IMPL.instance_get_active_by_window(context, begin, end) +def instance_get_active_by_window(context, begin, end=None, + project_id=None, fast=False): + """Get instances active during a certain time window. + + Setting fast to true will stop all joinedloads. + Specifying a project_id will filter for a certain project.""" + return IMPL.instance_get_active_by_window(context, begin, end, + project_id, fast) def instance_get_all_by_user(context, user_id): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index d1fbf8cab..2d50f458f 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1307,20 +1307,25 @@ def instance_get_all_by_filters(context, filters): @require_admin_context -def instance_get_active_by_window(context, begin, end=None): +def instance_get_active_by_window(context, begin, end=None, + project_id=None, fast=False): """Return instances that were continuously active over the given window""" session = get_session() - query = session.query(models.Instance).\ - options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('instance_type')).\ - filter(models.Instance.launched_at < begin) + query = session.query(models.Instance) + if not fast: + query = query.options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('instance_type')) + + query = query.filter(models.Instance.launched_at < begin) if end: query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > end)) else: query = query.filter(models.Instance.terminated_at == None) + if project_id: + query = query.filter_by(project_id=project_id) return query.all() diff --git a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py index d20e36aaf..2bd619820 100644 --- a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py +++ b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py @@ -23,13 +23,8 @@ from nova import context from nova import db from nova import flags from nova import test -from nova.compute import instance_types -from nova.db.sqlalchemy import models -from nova.db.sqlalchemy import session from nova.tests.api.openstack import fakes -from webob import exc -from nova.api.openstack.contrib import simple_tenant_usage FLAGS = flags.FLAGS @@ -44,27 +39,6 @@ STOP = datetime.datetime.utcnow() START = STOP - datetime.timedelta(hours=HOURS) -def fake_get_session(): - class FakeFetcher(object): - def fetchall(fetcher_self): - # return 10 rows, 2 tenants, 5 servers each, each run for 1 day - return [get_fake_db_row(START, - STOP, - x, - "faketenant_%s" % (x / SERVERS)) - for x in xrange(TENANTS * SERVERS)] - - class FakeConn(object): - def execute(self, query): - return FakeFetcher() - - class FakeSession(object): - def connection(self): - return FakeConn() - - return FakeSession() - - def fake_instance_type_get(context, instance_type_id): return {'id': 1, 'vcpus': VCPUS, @@ -74,25 +48,32 @@ def fake_instance_type_get(context, instance_type_id): 'fakeflavor'} -def get_fake_db_row(start, end, instance_id, tenant_id): - return [instance_id, - '1', - tenant_id, - 'fakeuser', - 'name', - 'state', - 1, - start, - None] +def get_fake_db_instance(start, end, instance_id, tenant_id): + return {'id': instance_id, + 'image_ref': '1', + 'project_id': tenant_id, + 'user_id': 'fakeuser', + 'display_name': 'name', + 'state_description': 'state', + 'instance_type_id': 1, + 'launched_at': start, + 'terminated_at': end} + +def fake_instance_get_active_by_window(context, begin, end, project_id, fast): + return [get_fake_db_instance(START, + STOP, + x, + "faketenant_%s" % (x / SERVERS)) + for x in xrange(TENANTS * SERVERS)] class SimpleTenantUsageTest(test.TestCase): def setUp(self): super(SimpleTenantUsageTest, self).setUp() - self.stubs.Set(session, "get_session", - fake_get_session) self.stubs.Set(db, "instance_type_get", fake_instance_type_get) + self.stubs.Set(db, "instance_get_active_by_window", + fake_instance_get_active_by_window) self.admin_context = context.RequestContext('fakeadmin_0', 'faketenant_0', is_admin=True) @@ -104,13 +85,9 @@ class SimpleTenantUsageTest(test.TestCase): is_admin=False) FLAGS.allow_admin_api = True - def test_verify_db_fields_exist_in_instance_model(self): - for field in simple_tenant_usage.INSTANCE_FIELDS: - self.assertTrue(field in models.Instance.__table__.columns) - def test_verify_index(self): req = webob.Request.blank( - '/v1.1/os-simple-tenant-usage?start=%s&end=%s' % + '/v1.1/123/os-simple-tenant-usage?start=%s&end=%s' % (START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" @@ -121,6 +98,8 @@ class SimpleTenantUsageTest(test.TestCase): self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) usages = res_dict['tenant_usages'] + from nova import log as logging + logging.warn(usages) for i in xrange(TENANTS): self.assertEqual(int(usages[i]['total_hours']), SERVERS * HOURS) @@ -134,7 +113,8 @@ class SimpleTenantUsageTest(test.TestCase): def test_verify_detailed_index(self): req = webob.Request.blank( - '/v1.1/os-simple-tenant-usage?detailed=1&start=%s&end=%s' % + '/v1.1/123/os-simple-tenant-usage?' + 'detailed=1&start=%s&end=%s' % (START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" @@ -151,7 +131,8 @@ class SimpleTenantUsageTest(test.TestCase): def test_verify_index_fails_for_nonadmin(self): req = webob.Request.blank( - '/v1.1/os-simple-tenant-usage?detailed=1&start=%s&end=%s' % + '/v1.1/123/os-simple-tenant-usage?' + 'detailed=1&start=%s&end=%s' % (START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" @@ -161,7 +142,8 @@ class SimpleTenantUsageTest(test.TestCase): def test_verify_show(self): req = webob.Request.blank( - '/v1.1/os-simple-tenant-usage/faketenant_0?start=%s&end=%s' % + '/v1.1/faketenant_0/os-simple-tenant-usage/' + 'faketenant_0?start=%s&end=%s' % (START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" @@ -179,7 +161,8 @@ class SimpleTenantUsageTest(test.TestCase): def test_verify_show_cant_view_other_tenant(self): req = webob.Request.blank( - '/v1.1/os-simple-tenant-usage/faketenant_0?start=%s&end=%s' % + '/v1.1/faketenant_1/os-simple-tenant-usage/' + 'faketenant_0?start=%s&end=%s' % (START.isoformat(), STOP.isoformat())) req.method = "GET" req.headers["content-type"] = "application/json" -- cgit From 470b9dc73c5e27ef8716436fe22e9f32dbdffd28 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 26 Aug 2011 17:40:22 -0700 Subject: add tests to verify NotFound exceptions are wrapped with the proper ids --- nova/tests/api/ec2/__init__.py | 19 +++++ nova/tests/api/ec2/test_middleware.py | 130 ++++++++++++++++++++++++++++++++++ nova/tests/test_cloud.py | 7 -- nova/tests/test_middleware.py | 85 ---------------------- 4 files changed, 149 insertions(+), 92 deletions(-) create mode 100644 nova/tests/api/ec2/__init__.py create mode 100644 nova/tests/api/ec2/test_middleware.py delete mode 100644 nova/tests/test_middleware.py diff --git a/nova/tests/api/ec2/__init__.py b/nova/tests/api/ec2/__init__.py new file mode 100644 index 000000000..6dab802f2 --- /dev/null +++ b/nova/tests/api/ec2/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Openstack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from nova.tests import * diff --git a/nova/tests/api/ec2/test_middleware.py b/nova/tests/api/ec2/test_middleware.py new file mode 100644 index 000000000..295f6c4ea --- /dev/null +++ b/nova/tests/api/ec2/test_middleware.py @@ -0,0 +1,130 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob +import webob.dec +import webob.exc + +from nova.api import ec2 +from nova import context +from nova import exception +from nova import flags +from nova import test +from nova import utils + +from xml.etree.ElementTree import fromstring as xml_to_tree + +FLAGS = flags.FLAGS + + +@webob.dec.wsgify +def conditional_forbid(req): + """Helper wsgi app returns 403 if param 'die' is 1.""" + if 'die' in req.params and req.params['die'] == '1': + raise webob.exc.HTTPForbidden() + return 'OK' + + +class LockoutTestCase(test.TestCase): + """Test case for the Lockout middleware.""" + def setUp(self): # pylint: disable=C0103 + super(LockoutTestCase, self).setUp() + utils.set_time_override() + self.lockout = ec2.Lockout(conditional_forbid) + + def tearDown(self): # pylint: disable=C0103 + utils.clear_time_override() + super(LockoutTestCase, self).tearDown() + + def _send_bad_attempts(self, access_key, num_attempts=1): + """Fail x.""" + for i in xrange(num_attempts): + req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key) + self.assertEqual(req.get_response(self.lockout).status_int, 403) + + def _is_locked_out(self, access_key): + """Sends a test request to see if key is locked out.""" + req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key) + return (req.get_response(self.lockout).status_int == 403) + + def test_lockout(self): + self._send_bad_attempts('test', FLAGS.lockout_attempts) + self.assertTrue(self._is_locked_out('test')) + + def test_timeout(self): + self._send_bad_attempts('test', FLAGS.lockout_attempts) + self.assertTrue(self._is_locked_out('test')) + utils.advance_time_seconds(FLAGS.lockout_minutes * 60) + self.assertFalse(self._is_locked_out('test')) + + def test_multiple_keys(self): + self._send_bad_attempts('test1', FLAGS.lockout_attempts) + self.assertTrue(self._is_locked_out('test1')) + self.assertFalse(self._is_locked_out('test2')) + utils.advance_time_seconds(FLAGS.lockout_minutes * 60) + self.assertFalse(self._is_locked_out('test1')) + self.assertFalse(self._is_locked_out('test2')) + + def test_window_timeout(self): + self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) + self.assertFalse(self._is_locked_out('test')) + utils.advance_time_seconds(FLAGS.lockout_window * 60) + self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) + self.assertFalse(self._is_locked_out('test')) + + +class ExecutorTestCase(test.TestCase): + def setUp(self): + super(ExecutorTestCase, self).setUp() + self.executor = ec2.Executor() + + def _execute(self, invoke): + class Fake(object): + pass + fake_ec2_request = Fake() + fake_ec2_request.invoke = invoke + + fake_wsgi_request = Fake() + + fake_wsgi_request.environ = { + 'nova.context': context.get_admin_context(), + 'ec2.request': fake_ec2_request, + } + return self.executor(fake_wsgi_request) + + def _extract_message(self, result): + tree = xml_to_tree(result.body) + return tree.findall('./Errors')[0].find('Error/Message').text + + def test_instance_not_found(self): + def not_found(context): + raise exception.InstanceNotFound(instance_id=5) + result = self._execute(not_found) + self.assertIn('i-00000005', self._extract_message(result)) + + def test_snapshot_not_found(self): + def not_found(context): + raise exception.SnapshotNotFound(snapshot_id=5) + result = self._execute(not_found) + self.assertIn('snap-00000005', self._extract_message(result)) + + def test_volume_not_found(self): + def not_found(context): + raise exception.VolumeNotFound(volume_id=5) + result = self._execute(not_found) + self.assertIn('vol-00000005', self._extract_message(result)) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 0793784f8..1bf12a06f 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -86,13 +86,6 @@ class CloudTestCase(test.TestCase): self.stubs.Set(rpc, 'cast', finish_cast) - def tearDown(self): - networks = db.project_get_networks(self.context, self.project_id, - associate=False) - for network in networks: - db.network_disassociate(self.context, network['id']) - super(CloudTestCase, self).tearDown() - def _create_key(self, name): # NOTE(vish): create depends on pool, so just call helper directly return cloud._gen_key(self.context, self.context.user_id, name) diff --git a/nova/tests/test_middleware.py b/nova/tests/test_middleware.py deleted file mode 100644 index 40d117c45..000000000 --- a/nova/tests/test_middleware.py +++ /dev/null @@ -1,85 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import webob -import webob.dec -import webob.exc - -from nova.api import ec2 -from nova import flags -from nova import test -from nova import utils - - -FLAGS = flags.FLAGS - - -@webob.dec.wsgify -def conditional_forbid(req): - """Helper wsgi app returns 403 if param 'die' is 1.""" - if 'die' in req.params and req.params['die'] == '1': - raise webob.exc.HTTPForbidden() - return 'OK' - - -class LockoutTestCase(test.TestCase): - """Test case for the Lockout middleware.""" - def setUp(self): # pylint: disable=C0103 - super(LockoutTestCase, self).setUp() - utils.set_time_override() - self.lockout = ec2.Lockout(conditional_forbid) - - def tearDown(self): # pylint: disable=C0103 - utils.clear_time_override() - super(LockoutTestCase, self).tearDown() - - def _send_bad_attempts(self, access_key, num_attempts=1): - """Fail x.""" - for i in xrange(num_attempts): - req = webob.Request.blank('/?AWSAccessKeyId=%s&die=1' % access_key) - self.assertEqual(req.get_response(self.lockout).status_int, 403) - - def _is_locked_out(self, access_key): - """Sends a test request to see if key is locked out.""" - req = webob.Request.blank('/?AWSAccessKeyId=%s' % access_key) - return (req.get_response(self.lockout).status_int == 403) - - def test_lockout(self): - self._send_bad_attempts('test', FLAGS.lockout_attempts) - self.assertTrue(self._is_locked_out('test')) - - def test_timeout(self): - self._send_bad_attempts('test', FLAGS.lockout_attempts) - self.assertTrue(self._is_locked_out('test')) - utils.advance_time_seconds(FLAGS.lockout_minutes * 60) - self.assertFalse(self._is_locked_out('test')) - - def test_multiple_keys(self): - self._send_bad_attempts('test1', FLAGS.lockout_attempts) - self.assertTrue(self._is_locked_out('test1')) - self.assertFalse(self._is_locked_out('test2')) - utils.advance_time_seconds(FLAGS.lockout_minutes * 60) - self.assertFalse(self._is_locked_out('test1')) - self.assertFalse(self._is_locked_out('test2')) - - def test_window_timeout(self): - self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) - self.assertFalse(self._is_locked_out('test')) - utils.advance_time_seconds(FLAGS.lockout_window * 60) - self._send_bad_attempts('test', FLAGS.lockout_attempts - 1) - self.assertFalse(self._is_locked_out('test')) -- cgit From 19cb3672f2849fe659173631f7f81ed489f1ea7e Mon Sep 17 00:00:00 2001 From: Jesse Andrews Date: Fri, 26 Aug 2011 17:46:47 -0700 Subject: v1.0 of server create injects first users keypair --- nova/api/openstack/create_instance_helper.py | 1 - nova/api/openstack/servers.py | 13 +++++++++++++ nova/tests/api/openstack/test_servers.py | 6 ++++-- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index e7428bf41..d82cb534f 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -19,7 +19,6 @@ import base64 from webob import exc from xml.dom import minidom -from nova import db from nova import exception from nova import flags from nova import log as logging diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ad563b771..f288f2228 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -22,6 +22,7 @@ from xml.dom import minidom import webob from nova import compute +from nova import db from nova import exception from nova import flags from nova import log as logging @@ -563,6 +564,18 @@ class ControllerV10(Controller): raise exc.HTTPNotFound() return webob.Response(status_int=202) + def create(self, req, body): + """ Creates a new server for a given user """ + # note(ja): v1.0 injects the first keypair for the project for testing + if 'server' in body and not 'key_name' in body['server']: + context = req.environ["nova.context"] + keypairs = db.key_pair_get_all_by_user(context.elevated(), + context.user_id) + if keypairs: + body['server']['key_name'] = keypairs[0]['name'] + + return super(ControllerV10, self).create(req, body) + def _image_ref_from_req_data(self, data): return data['server']['imageId'] diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index bb531f462..c54bead49 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1761,7 +1761,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageRef=image_href, flavorRef=flavor_ref, key_name='nonexistentkey')) - req = webob.Request.blank('/v1.1/servers') + req = webob.Request.blank('/v1.1/fake/servers') req.method = 'POST' req.body = json.dumps(body) req.headers["content-type"] = "application/json" @@ -1781,7 +1781,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageRef=image_href, flavorRef=flavor_ref, key_name='mykey')) - req = webob.Request.blank('/v1.1/servers') + req = webob.Request.blank('/v1.1/fake/servers') req.method = 'POST' req.body = json.dumps(body) req.headers["content-type"] = "application/json" @@ -3793,6 +3793,7 @@ class ServersViewBuilderV11Test(test.TestCase): "created": "2010-10-10T12:00:00Z", "progress": 0, "name": "test_server", + "key_name": "", "status": "BUILD", "hostId": '', "image": { @@ -3848,6 +3849,7 @@ class ServersViewBuilderV11Test(test.TestCase): "created": "2010-10-10T12:00:00Z", "progress": 0, "name": "test_server", + "key_name": "", "status": "BUILD", "hostId": '', "image": { -- cgit From 3a4ee30de0f619b7046e90ce9b6978e3a6dd20a2 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sat, 27 Aug 2011 14:07:55 -0700 Subject: Default rabbit max_retries to forever Modify carrot code to handle retry backoffs and obey max_retries = forever Fix some kombu issues from cut-n-paste Service should make sure to close the RPC connection --- nova/flags.py | 6 +++--- nova/rpc/impl_carrot.py | 28 +++++++++++++++++++++------- nova/rpc/impl_kombu.py | 35 +++++++++++++++++------------------ nova/service.py | 6 ++++++ 4 files changed, 47 insertions(+), 28 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index ac70386e7..e09b4721a 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -302,9 +302,9 @@ DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') -DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') -DEFINE_integer('rabbit_interval_stepping', 2, 'rabbit connection retry backoff in seconds') -DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') +DEFINE_integer('rabbit_retry_interval', 1, 'rabbit connection retry interval to start') +DEFINE_integer('rabbit_retry_backoff', 2, 'rabbit connection retry backoff in seconds') +DEFINE_integer('rabbit_max_retries', 0, 'maximum rabbit connection attempts (0=try forever)') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues') DEFINE_list('enabled_apis', ['ec2', 'osapi'], diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 529f98722..117489bc6 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -119,25 +119,34 @@ class Consumer(messaging.Consumer): """ def __init__(self, *args, **kwargs): - for i in xrange(FLAGS.rabbit_max_retries): - if i > 0: - time.sleep(FLAGS.rabbit_retry_interval) + max_retries = FALGS.rabbit_max_retries + sleep_time = FLAGS.rabbit_retry_interval + tries = 0 + while True: + tries += 1 + if tries > 1: + time.sleep(sleep_time) + # backoff for next retry attempt.. if there is one + sleep_time += FLAGS.rabbit_retry_backoff + if sleep_time > 30: + sleep_time = 30 try: super(Consumer, self).__init__(*args, **kwargs) self.failed_connection = False break except Exception as e: # Catching all because carrot sucks + self.failed_connection = True + if max_retries > 0 and tries == max_retries: + break fl_host = FLAGS.rabbit_host fl_port = FLAGS.rabbit_port - fl_intv = FLAGS.rabbit_retry_interval + fl_intv = sleep_time LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is' ' unreachable: %(e)s. Trying again in %(fl_intv)d' ' seconds.') % locals()) - self.failed_connection = True if self.failed_connection: LOG.error(_('Unable to connect to AMQP server ' - 'after %d tries. Shutting down.'), - FLAGS.rabbit_max_retries) + 'after %(tries)d tries. Shutting down.') % locals()) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): @@ -520,6 +529,11 @@ class MulticallWaiter(object): yield result +def create_connection(new=True): + """Create a connection""" + return Connection.instance(new=new) + + def create_consumer(conn, topic, proxy, fanout=False): """Create a consumer that calls methods in the proxy""" if fanout: diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 65199808e..db839dd2a 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -288,9 +288,13 @@ class Connection(object): def __init__(self): self.queues = [] self.max_retries = FLAGS.rabbit_max_retries + # Try forever? + if self.max_retries <= 0: + self.max_retries = None self.interval_start = FLAGS.rabbit_retry_interval - self.interval_stepping = FLAGS.rabbit_interval_stepping - self.interval_max = FLAGS.rabbit_retry_interval + self.interval_stepping = FLAGS.rabbit_retry_backoff + # max retry-interval = 30 seconds + self.interval_max = 30 self.params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, @@ -302,16 +306,6 @@ class Connection(object): self.connection = None self.reconnect() - @classmethod - def instance(cls, new=True): - """Returns the instance.""" - if new or not hasattr(cls, '_instance'): - if new: - return cls() - else: - cls._instance = cls() - return cls._instance - def reconnect(self): """Handles reconnecting and re-estblishing queues""" if self.connection: @@ -330,12 +324,12 @@ class Connection(object): interval_step=self.interval_stepping, interval_max=self.interval_max) except self.connection.connection_errors, e: + # We should only get here if max_retries is set. We'll go + # ahead and exit in this case. err_str = str(e) - max_retries = FLAGS.rabbit_max_retries + max_retries = self.max_retries LOG.error(_('Unable to connect to AMQP server ' 'after %(max_retries)d tries: %(err_str)s') % locals()) - # NOTE(comstud): Original carrot code exits after so many - # attempts, but I wonder if we should re-try indefinitely sys.exit(1) LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % self.params)) @@ -448,7 +442,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): LOG.debug('Creating new connection') - return RPCIMPL.Connection() + return Connection() # Create a ConnectionPool to use for RPC calls. We'll order the # pool as a stack (LIFO), so that we can potentially loop through and @@ -464,7 +458,7 @@ class ConnectionContext(object): if pooled: self.connection = ConnectionPool.get() else: - self.connection = RPCIMPL.Connection() + self.connection = Connection() self.pooled = pooled def __enter__(self): @@ -636,6 +630,11 @@ class MulticallWaiter(object): yield result +def create_connection(new=True): + """Create a connection""" + return ConnectionContext(pooled=not new) + + def create_consumer(conn, topic, proxy, fanout=False): """Create a consumer that calls a method in a proxy object""" if fanout: @@ -649,7 +648,7 @@ def create_consumer_set(conn, consumers): # Returns an object that you can call .wait() on to consume # all queues? # Needs to have a .close() which will stop consuming? - # Needs to also have an attach_to_eventlet method for tests? + # Needs to also have an method for tests? raise NotImplemented diff --git a/nova/service.py b/nova/service.py index 959e79052..a872a36ee 100644 --- a/nova/service.py +++ b/nova/service.py @@ -242,6 +242,12 @@ class Service(object): self.consumer_set_thread.wait() except greenlet.GreenletExit: pass + # Try to shut the connection down, but if we get any sort of + # errors, go ahead and ignore them.. as we're shutting down anyway + try: + self.conn.close() + except Exception: + pass for x in self.timers: try: x.stop() -- cgit From 4faaf9c37d030d68cfea818d396963e3ed7deeaa Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sat, 27 Aug 2011 21:33:14 -0700 Subject: fix FALGS typo --- nova/rpc/impl_carrot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 117489bc6..40097e10e 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -119,7 +119,7 @@ class Consumer(messaging.Consumer): """ def __init__(self, *args, **kwargs): - max_retries = FALGS.rabbit_max_retries + max_retries = FLAGS.rabbit_max_retries sleep_time = FLAGS.rabbit_retry_interval tries = 0 while True: -- cgit From 75c7c841379341c63598850e4676f2146d63334a Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 28 Aug 2011 16:17:17 +0530 Subject: Bug #835964: pep8 violations in IPv6 code Fix pep8 violations. --- nova/ipv6/account_identifier.py | 3 ++- nova/tests/test_ipv6.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py index 27bb01988..8a08510ac 100644 --- a/nova/ipv6/account_identifier.py +++ b/nova/ipv6/account_identifier.py @@ -39,7 +39,8 @@ def to_global(prefix, mac, project_id): except TypeError: raise TypeError(_('Bad prefix for to_global_ipv6: %s') % prefix) except NameError: - raise TypeError(_('Bad project_id for to_global_ipv6: %s') % project_id) + raise TypeError(_('Bad project_id for to_global_ipv6: %s') % + project_id) def to_mac(ipv6_address): diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py index 04c1b5598..e1ba4aafb 100644 --- a/nova/tests/test_ipv6.py +++ b/nova/tests/test_ipv6.py @@ -48,7 +48,7 @@ class IPv6RFC2462TestCase(test.TestCase): def test_to_global_with_bad_prefix(self): bad_prefix = '82' self.assertRaises(TypeError, ipv6.to_global, - bad_prefix, + bad_prefix, '2001:db8::216:3eff:fe33:4455', 'test') -- cgit From 07cbdbedcab3e796f330e21b1ffe407bd646ae67 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 28 Aug 2011 16:19:55 +0530 Subject: Bug #835952: pep8 failures do not cause the tests to fail Add set -eu to run_tests.sh. This will cause it to fail whenever anything goes wrong, which is exactly what we want in a test script. To do this, I had to remove the use of the "let" keyword, which has a bizarre exit status in bash. I also removed the "|| exit" after run_tests, which means that this script will now exit with status 1, not status 0, if run_tests fails. --- run_tests.sh | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index 871332b4a..c1fda4cf9 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -1,5 +1,7 @@ #!/bin/bash +set -eu + function usage { echo "Usage: $0 [OPTION]..." echo "Run Nova's test suite(s)" @@ -24,13 +26,13 @@ function usage { function process_option { case "$1" in -h|--help) usage;; - -V|--virtual-env) let always_venv=1; let never_venv=0;; - -N|--no-virtual-env) let always_venv=0; let never_venv=1;; - -r|--recreate-db) let recreate_db=1;; - -n|--no-recreate-db) let recreate_db=0;; - -f|--force) let force=1;; - -p|--pep8) let just_pep8=1;; - -c|--coverage) let coverage=1;; + -V|--virtual-env) always_venv=1; never_venv=0;; + -N|--no-virtual-env) always_venv=0; never_venv=1;; + -r|--recreate-db) recreate_db=1;; + -n|--no-recreate-db) recreate_db=0;; + -f|--force) force=1;; + -p|--pep8) just_pep8=1;; + -c|--coverage) coverage=1;; -*) noseopts="$noseopts $1";; *) noseargs="$noseargs $1" esac @@ -130,7 +132,7 @@ if [ $recreate_db -eq 1 ]; then rm -f tests.sqlite fi -run_tests || exit +run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, # not when we're running tests individually. To handle this, we need to -- cgit From 17c0a1aad98effa4554a9083b35185f2d430d81f Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sun, 28 Aug 2011 23:40:09 +0530 Subject: In the XenAPI simulator, set VM.domid, when creating the instance initially, and when starting the VM. This shows up in the logs for Bug #831599, but this fix doesn't actually fix the hang. --- nova/tests/xenapi/stubs.py | 4 ++++ nova/virt/xenapi/fake.py | 3 +++ 2 files changed, 7 insertions(+) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index a6a1febd6..d309cfb88 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -18,6 +18,8 @@ import eventlet import json +import random + from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils @@ -191,6 +193,7 @@ class FakeSessionForVMTests(fake.SessionBase): vm['power_state'] = 'Running' vm['is_a_template'] = False vm['is_control_domain'] = False + vm['domid'] = random.randrange(0, 1 << 16) def VM_snapshot(self, session_ref, vm_ref, label): status = "Running" @@ -290,6 +293,7 @@ class FakeSessionForMigrationTests(fake.SessionBase): vm['power_state'] = 'Running' vm['is_a_template'] = False vm['is_control_domain'] = False + vm['domid'] = random.randrange(0, 1 << 16) def stub_out_migration_methods(stubs): diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 7c91aa9b9..399f6ec54 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -51,6 +51,7 @@ A fake XenAPI SDK. """ +import random import uuid from pprint import pformat @@ -103,8 +104,10 @@ def create_network(name_label, bridge): def create_vm(name_label, status, is_a_template=False, is_control_domain=False): + domid = status == 'Running' and random.randrange(0, 1 << 16) or -1 return _create_object('VM', {'name_label': name_label, + 'domid': domid, 'power-state': status, 'is_a_template': is_a_template, 'is_control_domain': is_control_domain}) -- cgit From c35086ca2d2ac1a519442217bf89c0572b28053b Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Mon, 29 Aug 2011 00:41:14 +0530 Subject: Start domid's at 1, not 0, to avoid any confusion with dom0. --- nova/tests/xenapi/stubs.py | 4 ++-- nova/virt/xenapi/fake.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index d309cfb88..647a4c1df 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -193,7 +193,7 @@ class FakeSessionForVMTests(fake.SessionBase): vm['power_state'] = 'Running' vm['is_a_template'] = False vm['is_control_domain'] = False - vm['domid'] = random.randrange(0, 1 << 16) + vm['domid'] = random.randrange(1, 1 << 16) def VM_snapshot(self, session_ref, vm_ref, label): status = "Running" @@ -293,7 +293,7 @@ class FakeSessionForMigrationTests(fake.SessionBase): vm['power_state'] = 'Running' vm['is_a_template'] = False vm['is_control_domain'] = False - vm['domid'] = random.randrange(0, 1 << 16) + vm['domid'] = random.randrange(1, 1 << 16) def stub_out_migration_methods(stubs): diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 399f6ec54..97dfd9fa9 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -104,7 +104,7 @@ def create_network(name_label, bridge): def create_vm(name_label, status, is_a_template=False, is_control_domain=False): - domid = status == 'Running' and random.randrange(0, 1 << 16) or -1 + domid = status == 'Running' and random.randrange(1, 1 << 16) or -1 return _create_object('VM', {'name_label': name_label, 'domid': domid, -- cgit From 256cb956abeff85f3cddce499b488dd112c4137d Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 17:33:11 -0700 Subject: start to rework some consumer stuff --- nova/rpc/impl_kombu.py | 127 +++++++++++++++++++++++++++++-------------------- nova/service.py | 24 ++-------- 2 files changed, 81 insertions(+), 70 deletions(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index db839dd2a..01871606c 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -35,11 +35,11 @@ flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') -class QueueBase(object): - """Queue base class.""" +class ConsumerBase(object): + """Consumer base class.""" def __init__(self, channel, callback, tag, **kwargs): - """Init the queue. + """Declare a queue on an amqp channel. 'channel' is the amqp channel to use 'callback' is the callback to call when messages are received @@ -55,20 +55,21 @@ class QueueBase(object): self.reconnect(channel) def reconnect(self, channel): - """Re-create the queue after a rabbit reconnect""" + """Re-declare the queue after a rabbit reconnect""" self.channel = channel self.kwargs['channel'] = channel self.queue = kombu.entity.Queue(**self.kwargs) self.queue.declare() def consume(self, *args, **kwargs): - """Consume from this queue. + """Actually declare the consumer on the amqp channel. This will + start the flow of messages from the queue. Using the + Connection.iterconsume() iterator will process the messages, + calling the appropriate callback. + If a callback is specified in kwargs, use that. Otherwise, use the callback passed during __init__() - The callback will be called if a message was read off of the - queue. - If kwargs['nowait'] is True, then this call will block until a message is read. @@ -100,7 +101,7 @@ class QueueBase(object): self.queue = None -class DirectQueue(QueueBase): +class DirectConsumer(ConsumerBase): """Queue/consumer class for 'direct'""" def __init__(self, channel, msg_id, callback, tag, **kwargs): @@ -123,7 +124,7 @@ class DirectQueue(QueueBase): type='direct', durable=options['durable'], auto_delete=options['auto_delete']) - super(DirectQueue, self).__init__( + super(DirectConsumer, self).__init__( channel, callback, tag, @@ -133,8 +134,8 @@ class DirectQueue(QueueBase): **options) -class TopicQueue(QueueBase): - """Queue/consumer class for 'topic'""" +class TopicConsumer(ConsumerBase): + """Consumer class for 'topic'""" def __init__(self, channel, topic, callback, tag, **kwargs): """Init a 'topic' queue. @@ -156,7 +157,7 @@ class TopicQueue(QueueBase): type='topic', durable=options['durable'], auto_delete=options['auto_delete']) - super(TopicQueue, self).__init__( + super(TopicConsumer, self).__init__( channel, callback, tag, @@ -166,8 +167,8 @@ class TopicQueue(QueueBase): **options) -class FanoutQueue(QueueBase): - """Queue/consumer class for 'fanout'""" +class FanoutConsumer(ConsumerBase): + """Consumer class for 'fanout'""" def __init__(self, channel, topic, callback, tag, **kwargs): """Init a 'fanout' queue. @@ -193,7 +194,7 @@ class FanoutQueue(QueueBase): type='fanout', durable=options['durable'], auto_delete=options['auto_delete']) - super(FanoutQueue, self).__init__( + super(FanoutConsumer, self).__init__( channel, callback, tag, @@ -286,7 +287,8 @@ class Connection(object): """Connection instance object.""" def __init__(self): - self.queues = [] + self.consumers = [] + self.consumer_thread = None self.max_retries = FLAGS.rabbit_max_retries # Try forever? if self.max_retries <= 0: @@ -334,9 +336,9 @@ class Connection(object): LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % self.params)) self.channel = self.connection.channel() - for consumer in self.queues: + for consumer in self.consumers: consumer.reconnect(self.channel) - if self.queues: + if self.consumers: LOG.debug(_("Re-established AMQP queues")) def get_channel(self): @@ -354,30 +356,32 @@ class Connection(object): def close(self): """Close/release this connection""" + self.cancel_consumer_thread() self.connection.release() self.connection = None def reset(self): """Reset a connection so it can be used again""" + self.cancel_consumer_thread() self.channel.close() self.channel = self.connection.channel() - self.queues = [] + self.consumers = [] - def create_queue(self, queue_cls, topic, callback): - """Create a queue using the class that was passed in and - add it to our list of queues used for consuming + def declare_consumer(self, consumer_cls, topic, callback): + """Create a Consumer using the class that was passed in and + add it to our list of consumers """ - queue = queue_cls(self.channel, topic, callback, - self.queue_num.next()) - self.queues.append(queue) - return queue + consumer = consumer_cls(self.channel, topic, callback, + self.consumer_num.next()) + self.consumers.append(consumer) + return consumer - def consume(self, limit=None): - """Consume from all queues""" + def iterconsume(self, limit=None): + """Return an iterator that will consume from all queues/consumers""" while True: try: - queues_head = self.queues[:-1] - queues_tail = self.queues[-1] + queues_head = self.consumers[:-1] + queues_tail = self.consumers[-1] for queue in queues_head: queue.consume(nowait=True) queues_tail.consume(nowait=False) @@ -391,6 +395,36 @@ class Connection(object): '%s' % str(e))) self.reconnect() + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if not self.consumer_thread: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self.consumer_thread: + self.consumer_thread.kill() + try: + self.consumer_thread.wait() + except greenlet.GreenletExit: + pass + self.consumer_thread = None + def publisher_send(self, cls, topic, msg): """Send to a publisher based on the publisher class""" while True: @@ -408,20 +442,20 @@ class Connection(object): except self.connection.connection_errors, e: pass - def direct_consumer(self, topic, callback): + def declare_direct_consumer(self, topic, callback): """Create a 'direct' queue. In nova's use, this is generally a msg_id queue used for responses for call/multicall """ - return self.create_queue(DirectQueue, topic, callback) + self.declare_consumer(DirectConsumer, topic, callback) - def topic_consumer(self, topic, callback=None): - """Create a 'topic' queue.""" - return self.create_queue(TopicQueue, topic, callback) + def declare_topic_consumer(self, topic, callback=None): + """Create a 'topic' consumer.""" + self.declare_consumer(TopicConsumer, topic, callback) - def fanout_consumer(self, topic, callback): - """Create a 'fanout' queue""" - return self.create_queue(FanoutQueue, topic, callback) + def declare_fanout_consumer(self, topic, callback): + """Create a 'fanout' consumer""" + self.declare_consumer(FanoutConsumer, topic, callback) def direct_send(self, msg_id, msg): """Send a 'direct' message""" @@ -638,18 +672,9 @@ def create_connection(new=True): def create_consumer(conn, topic, proxy, fanout=False): """Create a consumer that calls a method in a proxy object""" if fanout: - return conn.fanout_consumer(topic, ProxyCallback(proxy)) + conn.declare_fanout_consumer(topic, ProxyCallback(proxy)) else: - return conn.topic_consumer(topic, ProxyCallback(proxy)) - - -def create_consumer_set(conn, consumers): - # FIXME(comstud): Replace this however necessary - # Returns an object that you can call .wait() on to consume - # all queues? - # Needs to have a .close() which will stop consuming? - # Needs to also have an method for tests? - raise NotImplemented + conn.declare_topic_consumer(topic, ProxyCallback(proxy)) def multicall(context, topic, msg): @@ -666,7 +691,7 @@ def multicall(context, topic, msg): conn = ConnectionContext() wait_msg = MulticallWaiter(conn) - conn.direct_consumer(msg_id, wait_msg) + conn.declare_direct_consumer(msg_id, wait_msg) conn.topic_send(topic, msg) return wait_msg diff --git a/nova/service.py b/nova/service.py index a872a36ee..ab7925eb3 100644 --- a/nova/service.py +++ b/nova/service.py @@ -153,26 +153,17 @@ class Service(object): self.topic) # Share this same connection for these Consumers - consumer_all = rpc.create_consumer(self.conn, self.topic, self, + rpc.create_consumer(self.conn, self.topic, self, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) - consumer_node = rpc.create_consumer(self.conn, node_topic, self, + rpc.create_consumer(self.conn, node_topic, self, fanout=False) - fanout = rpc.create_consumer(self.conn, self.topic, self, fanout=True) + rpc.create_consumer(self.conn, self.topic, self, fanout=True) - consumers = [consumer_all, consumer_node, fanout] - consumer_set = rpc.create_consumer_set(self.conn, consumers) - - # Wait forever, processing these consumers - def _wait(): - try: - consumer_set.wait() - finally: - consumer_set.close() - - self.consumer_set_thread = eventlet.spawn(_wait) + # Consume from all consumers in a thread + self.conn.consume_in_thread() if self.report_interval: pulse = utils.LoopingCall(self.report_state) @@ -237,11 +228,6 @@ class Service(object): logging.warn(_('Service killed that has no database entry')) def stop(self): - self.consumer_set_thread.kill() - try: - self.consumer_set_thread.wait() - except greenlet.GreenletExit: - pass # Try to shut the connection down, but if we get any sort of # errors, go ahead and ignore them.. as we're shutting down anyway try: -- cgit From 32943729861ba4ad562e899a55af77b7974af8db Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 18:17:21 -0700 Subject: fix test_rpc and kombu stuff --- nova/rpc/FIXME | 2 -- nova/rpc/__init__.py | 7 +------ nova/rpc/impl_carrot.py | 56 +++++++++++++++++++++++++++++++++++++++++-------- nova/rpc/impl_kombu.py | 21 ++++++++++++++----- nova/tests/test_rpc.py | 25 +++++++++++++--------- 5 files changed, 79 insertions(+), 32 deletions(-) delete mode 100644 nova/rpc/FIXME diff --git a/nova/rpc/FIXME b/nova/rpc/FIXME deleted file mode 100644 index 704081802..000000000 --- a/nova/rpc/FIXME +++ /dev/null @@ -1,2 +0,0 @@ -Move some code duplication between carrot/kombu into common.py -The other FIXMEs in __init__.py and impl_kombu.py diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 9371c2ab3..10b69c8b5 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -38,18 +38,13 @@ RPCIMPL = import_object(impl_table.get(FLAGS.rpc_backend, def create_connection(new=True): - return RPCIMPL.Connection.instance(new=True) + return RPCIMPL.create_connection(new=new) def create_consumer(conn, topic, proxy, fanout=False): return RPCIMPL.create_consumer(conn, topic, proxy, fanout) -def create_consumer_set(conn, consumers): - # FIXME(comstud): replace however necessary - return RPCIMPL.ConsumerSet(connection=conn, consumer_list=consumers) - - def call(context, topic, msg): return RPCIMPL.call(context, topic, msg) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 40097e10e..efff788a0 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -33,6 +33,7 @@ import uuid from carrot import connection as carrot_connection from carrot import messaging +import eventlet from eventlet import greenpool from eventlet import pools from eventlet import queue @@ -42,10 +43,10 @@ from nova import context from nova import exception from nova import fakerabbit from nova import flags -from nova import log as logging -from nova import utils from nova.rpc.common import RemoteError, LOG +# Needed for tests +eventlet.monkey_patch() FLAGS = flags.FLAGS flags.DEFINE_integer('rpc_thread_pool_size', 1024, @@ -57,6 +58,11 @@ flags.DEFINE_integer('rpc_conn_pool_size', 30, class Connection(carrot_connection.BrokerConnection): """Connection instance object.""" + def __init__(self, *args, **kwargs): + super(Connection, self).__init__(*args, **kwargs) + self._rpc_consumers = [] + self._rpc_consumer_thread = None + @classmethod def instance(cls, new=True): """Returns the instance.""" @@ -94,6 +100,42 @@ class Connection(carrot_connection.BrokerConnection): pass return cls.instance() + def close(self): + self.cancel_consumer_thread() + for consumer in self._rpc_consumers: + try: + consumer.close() + except Exception: + # ignore all errors + pass + self._rpc_consumers = [] + super(Connection, self).close() + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + + consumer_set = ConsumerSet(connection=self, + consumer_list=self._rpc_consumers) + + def _consumer_thread(): + try: + consumer_set.wait() + except greenlet.GreenletExit: + return + if not self._rpc_consumer_thread: + self._rpc_consumer_thread = eventlet.spawn(_consumer_thread) + return self._rpc_consumer_thread + + def cancel_consumer_thread(self): + """Cancel a consumer thread""" + if self._rpc_consumer_thread: + self._rpc_consumer_thread.kill() + try: + self._rpc_consumer_thread.wait() + except greenlet.GreenletExit: + pass + self._rpc_consumer_thread = None + class Pool(pools.Pool): """Class that implements a Pool of Connections.""" @@ -119,6 +161,7 @@ class Consumer(messaging.Consumer): """ def __init__(self, *args, **kwargs): + connection = kwargs.get('connection') max_retries = FLAGS.rabbit_max_retries sleep_time = FLAGS.rabbit_retry_interval tries = 0 @@ -148,6 +191,7 @@ class Consumer(messaging.Consumer): LOG.error(_('Unable to connect to AMQP server ' 'after %(tries)d tries. Shutting down.') % locals()) sys.exit(1) + connection._rpc_consumers.append(self) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): """Wraps the parent fetch with some logic for failed connection.""" @@ -175,12 +219,6 @@ class Consumer(messaging.Consumer): LOG.exception(_('Failed to fetch message from queue: %s' % e)) self.failed_connection = True - def attach_to_eventlet(self): - """Only needed for unit tests!""" - timer = utils.LoopingCall(self.fetch, enable_callbacks=True) - timer.start(0.1) - return timer - class AdapterConsumer(Consumer): """Calls methods on a proxy object based on method and args.""" @@ -251,7 +289,7 @@ class AdapterConsumer(Consumer): # NOTE(vish): this iterates through the generator list(rval) except Exception as e: - logging.exception('Exception during message handling') + LOG.exception('Exception during message handling') if msg_id: msg_reply(msg_id, None, sys.exc_info()) return diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 01871606c..bd83bc520 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -14,9 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -from nova import flags -from nova.rpc.common import RemoteError, LOG - import kombu import kombu.entity import kombu.messaging @@ -24,8 +21,22 @@ import kombu.connection import itertools import sys import time +import traceback +import types import uuid +import eventlet +from eventlet import greenpool +from eventlet import pools +import greenlet + +from nova import context +from nova import exception +from nova import flags +from nova.rpc.common import RemoteError, LOG + +# Needed for tests +eventlet.monkey_patch() FLAGS = flags.FLAGS @@ -317,7 +328,7 @@ class Connection(object): pass time.sleep(1) self.connection = kombu.connection.Connection(**self.params) - self.queue_num = itertools.count(1) + self.consumer_num = itertools.count(1) try: self.connection.ensure_connection(errback=self.connect_error, @@ -634,7 +645,7 @@ class RpcContext(context.RequestContext): class MulticallWaiter(object): def __init__(self, connection): self._connection = connection - self._iterator = connection.consume() + self._iterator = connection.iterconsume() self._result = None self._done = False diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index ba9c0a859..2b9922491 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -33,13 +33,17 @@ class RpcTestCase(test.TestCase): super(RpcTestCase, self).setUp() self.conn = rpc.create_connection(True) self.receiver = TestReceiver() - self.consumer = rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) - self.consumer.attach_to_eventlet() + rpc.create_consumer(self.conn, + 'test', + self.receiver, + False) + self.conn.consume_in_thread() self.context = context.get_admin_context() + def tearDown(self): + self.conn.close() + super(RpcTestCase, self).tearDown() + def test_call_succeed(self): value = 42 result = rpc.call(self.context, 'test', {"method": "echo", @@ -139,16 +143,17 @@ class RpcTestCase(test.TestCase): nested = Nested() conn = rpc.create_connection(True) - consumer = rpc.create_consumer(conn, - 'nested', - nested, - False) - consumer.attach_to_eventlet() + rpc.create_consumer(conn, + 'nested', + nested, + False) + conn.consume_in_thread() value = 42 result = rpc.call(self.context, 'nested', {"method": "echo", "args": {"queue": "test", "value": value}}) + conn.close() self.assertEqual(value, result) -- cgit From e5310d666f167efe6e3c9f97176d13801489fdfd Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 18:18:40 -0700 Subject: fix nova-ajax-console-proxy --- bin/nova-ajax-console-proxy | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index 0a789b4b9..b3205ec56 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -113,11 +113,11 @@ class AjaxConsoleProxy(object): AjaxConsoleProxy.tokens[kwargs['token']] = \ {'args': kwargs, 'last_activity': time.time()} - conn = rpc.create_connection(new=True) - consumer = rpc.create_consumer( - conn, - FLAGS.ajax_console_proxy_topic, - TopicProxy) + self.conn = rpc.create_connection(new=True) + rpc.create_consumer( + self.conn, + FLAGS.ajax_console_proxy_topic, + TopicProxy) def delete_expired_tokens(): now = time.time() @@ -129,7 +129,7 @@ class AjaxConsoleProxy(object): for k in to_delete: del AjaxConsoleProxy.tokens[k] - utils.LoopingCall(consumer.fetch, enable_callbacks=True).start(0.1) + self.conn.consume_in_thread() utils.LoopingCall(delete_expired_tokens).start(1) if __name__ == '__main__': @@ -142,3 +142,4 @@ if __name__ == '__main__': server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port) service.serve(server) service.wait() + self.conn.close() -- cgit From da11af2893719677a9113ce391d37b0dada6585c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 18:19:41 -0700 Subject: fix nova/tests/test_test.py --- nova/tests/test_test.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py index 64f11fa45..6075abbb0 100644 --- a/nova/tests/test_test.py +++ b/nova/tests/test_test.py @@ -40,6 +40,6 @@ class IsolationTestCase(test.TestCase): connection = rpc.create_connection(new=True) proxy = NeverCalled() - consumer = rpc.create_consumer(connection, 'compute', - proxy, fanout=False) - consumer.attach_to_eventlet() + rpc.create_consumer(connection, 'compute', + proxy, fanout=False) + connection.consume_in_thread() -- cgit From 6b6de435efb83fc88c885c459fb70c46c646be84 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Sun, 28 Aug 2011 22:02:43 -0400 Subject: Update the EC2 ToToken middleware to use eventlet.green.httplib instead of httplib2. Fixes issues where the JSON request body wasn't getting sent to Keystone. --- nova/api/ec2/__init__.py | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 5430f443d..ec4743cea 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -20,7 +20,10 @@ Starting point for routing EC2 requests. """ -import httplib2 +from urlparse import urlparse + +import eventlet +from eventlet.green import httplib import webob import webob.dec import webob.exc @@ -35,7 +38,6 @@ from nova.api.ec2 import apirequest from nova.api.ec2 import ec2utils from nova.auth import manager - FLAGS = flags.FLAGS LOG = logging.getLogger("nova.api") flags.DEFINE_integer('lockout_attempts', 5, @@ -158,7 +160,6 @@ class ToToken(wsgi.Middleware): auth_params.pop('Signature') # Authenticate the request. - client = httplib2.Http() creds = {'ec2Credentials': {'access': access, 'signature': signature, 'host': req.host, @@ -166,18 +167,24 @@ class ToToken(wsgi.Middleware): 'path': req.path, 'params': auth_params, }} - headers = {'Content-Type': 'application/json'}, - resp, content = client.request(FLAGS.keystone_ec2_url, - 'POST', - headers=headers, - body=utils.dumps(creds)) + creds_json = utils.dumps(creds) + headers = {'Content-Type': 'application/json'} + o = urlparse(FLAGS.keystone_ec2_url) + if o.scheme == "http": + conn = httplib.HTTPConnection(o.netloc) + else: + conn = httplib.HTTPSConnection(o.netloc) + conn.request('POST', o.path, body=creds_json, headers=headers) + response = conn.getresponse().read() + conn.close() + # NOTE(vish): We could save a call to keystone by # having keystone return token, tenant, # user, and roles from this call. - result = utils.loads(content) + result = utils.loads(response) # TODO(vish): check for errors - token_id = result['auth']['token']['id'] + token_id = result['auth']['token']['id'] # Authenticated! req.headers['X-Auth-Token'] = token_id return self.application -- cgit From 6fbb35d596f670d6dcdda2486a12fc09ef9be853 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 19:22:53 -0700 Subject: add carrot/kombu tests... small thread fix for kombu --- nova/rpc/__init__.py | 4 +- nova/rpc/common.py | 6 + nova/rpc/impl_carrot.py | 4 - nova/rpc/impl_kombu.py | 12 +- nova/tests/test_rpc_amqp.py | 88 -------------- nova/tests/test_rpc_carrot.py | 202 ++++++++++++++++++++++++++++++++ nova/tests/test_rpc_kombu.py | 266 ++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 481 insertions(+), 101 deletions(-) delete mode 100644 nova/tests/test_rpc_amqp.py create mode 100644 nova/tests/test_rpc_carrot.py create mode 100644 nova/tests/test_rpc_kombu.py diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 10b69c8b5..2a47ba87b 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,7 +23,7 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'carrot', + 'kombu', "The messaging module to use, defaults to carrot.") impl_table = {'kombu': 'nova.rpc.impl_kombu', @@ -42,7 +42,7 @@ def create_connection(new=True): def create_consumer(conn, topic, proxy, fanout=False): - return RPCIMPL.create_consumer(conn, topic, proxy, fanout) + RPCIMPL.create_consumer(conn, topic, proxy, fanout) def call(context, topic, msg): diff --git a/nova/rpc/common.py b/nova/rpc/common.py index 1d3065a83..b8c280630 100644 --- a/nova/rpc/common.py +++ b/nova/rpc/common.py @@ -1,8 +1,14 @@ from nova import exception +from nova import flags from nova import log as logging LOG = logging.getLogger('nova.rpc') +flags.DEFINE_integer('rpc_thread_pool_size', 1024, + 'Size of RPC thread pool') +flags.DEFINE_integer('rpc_conn_pool_size', 30, + 'Size of RPC connection pool') + class RemoteError(exception.Error): """Signifies that a remote class has raised an exception. diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index efff788a0..07af0a116 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -49,10 +49,6 @@ from nova.rpc.common import RemoteError, LOG eventlet.monkey_patch() FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') -flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') class Connection(carrot_connection.BrokerConnection): diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index bd83bc520..49bca1d81 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -40,11 +40,6 @@ eventlet.monkey_patch() FLAGS = flags.FLAGS -flags.DEFINE_integer('rpc_conn_pool_size', 30, - 'Size of RPC connection pool') -flags.DEFINE_integer('rpc_thread_pool_size', 1024, - 'Size of RPC thread pool') - class ConsumerBase(object): """Consumer base class.""" @@ -328,6 +323,9 @@ class Connection(object): pass time.sleep(1) self.connection = kombu.connection.Connection(**self.params) + if FLAGS.fake_rabbit: + # Kludge to speed up tests. + self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) try: @@ -422,13 +420,13 @@ class Connection(object): self.consume() except greenlet.GreenletExit: return - if not self.consumer_thread: + if self.consumer_thread is None: self.consumer_thread = eventlet.spawn(_consumer_thread) return self.consumer_thread def cancel_consumer_thread(self): """Cancel a consumer thread""" - if self.consumer_thread: + if self.consumer_thread is not None: self.consumer_thread.kill() try: self.consumer_thread.wait() diff --git a/nova/tests/test_rpc_amqp.py b/nova/tests/test_rpc_amqp.py deleted file mode 100644 index 2215a908b..000000000 --- a/nova/tests/test_rpc_amqp.py +++ /dev/null @@ -1,88 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright (c) 2010 Openstack, LLC. -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Tests For RPC AMQP. -""" - -from nova import context -from nova import log as logging -from nova import rpc -from nova.rpc import amqp -from nova import test - - -LOG = logging.getLogger('nova.tests.rpc') - - -class RpcAMQPTestCase(test.TestCase): - def setUp(self): - super(RpcAMQPTestCase, self).setUp() - self.conn = rpc.create_connection(True) - self.receiver = TestReceiver() - self.consumer = rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) - self.consumer.attach_to_eventlet() - self.context = context.get_admin_context() - - def test_connectionpool_single(self): - """Test that ConnectionPool recycles a single connection.""" - conn1 = amqp.ConnectionPool.get() - amqp.ConnectionPool.put(conn1) - conn2 = amqp.ConnectionPool.get() - amqp.ConnectionPool.put(conn2) - self.assertEqual(conn1, conn2) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) diff --git a/nova/tests/test_rpc_carrot.py b/nova/tests/test_rpc_carrot.py new file mode 100644 index 000000000..cf84980ab --- /dev/null +++ b/nova/tests/test_rpc_carrot.py @@ -0,0 +1,202 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using queue +""" + +from nova import context +from nova import log as logging +from nova.rpc import impl_carrot as rpc +from nova import test + + +LOG = logging.getLogger('nova.tests.rpc') + + +class RpcCarrotTestCase(test.TestCase): + def setUp(self): + super(RpcCarrotTestCase, self).setUp() + self.conn = rpc.create_connection(True) + self.receiver = TestReceiver() + rpc.create_consumer(self.conn, + 'test', + self.receiver, + False) + self.conn.consume_in_thread() + self.context = context.get_admin_context() + + def tearDown(self): + self.conn.close() + super(RpcCarrotTestCase, self).tearDown() + + def test_connectionpool_single(self): + """Test that ConnectionPool recycles a single connection.""" + conn1 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn1) + conn2 = rpc.ConnectionPool.get() + rpc.ConnectionPool.put(conn2) + self.assertEqual(conn1, conn2) + + def test_call_succeed(self): + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns(self): + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + value = 42 + result = rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_multicall_succeed_once(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + for i, x in enumerate(result): + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times_yield(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_context_passed(self): + """Makes sure a context is passed through rpc call.""" + value = 42 + result = rpc.call(self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + + def test_call_exception(self): + """Test that exception gets passed back properly. + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + + """ + value = 42 + self.assertRaises(rpc.RemoteError, + rpc.call, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + try: + rpc.call(self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown rpc.RemoteError") + except rpc.RemoteError as exc: + self.assertEqual(int(exc.value), value) + + def test_nested_calls(self): + """Test that we can do an rpc.call inside another call.""" + class Nested(object): + @staticmethod + def echo(context, queue, value): + """Calls echo in the passed queue""" + LOG.debug(_("Nested received %(queue)s, %(value)s") + % locals()) + # TODO: so, it will replay the context and use the same REQID? + # that's bizarre. + ret = rpc.call(context, + queue, + {"method": "echo", + "args": {"value": value}}) + LOG.debug(_("Nested return %s"), ret) + return value + + nested = Nested() + conn = rpc.create_connection(True) + rpc.create_consumer(conn, + 'nested', + nested, + False) + conn.consume_in_thread() + value = 42 + result = rpc.call(self.context, + 'nested', {"method": "echo", + "args": {"queue": "test", + "value": value}}) + conn.close() + self.assertEqual(value, result) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. + + """ + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in.""" + LOG.debug(_("Received %s"), value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context.""" + LOG.debug(_("Received %s"), context) + return context.to_dict() + + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value + 1) + context.reply(value + 2) + + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in.""" + raise Exception(value) diff --git a/nova/tests/test_rpc_kombu.py b/nova/tests/test_rpc_kombu.py new file mode 100644 index 000000000..457dfdeca --- /dev/null +++ b/nova/tests/test_rpc_kombu.py @@ -0,0 +1,266 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls using queue +""" + +from nova import context +from nova import log as logging +from nova.rpc import impl_kombu as rpc +from nova import test + + +LOG = logging.getLogger('nova.tests.rpc') + + +class RpcKombuTestCase(test.TestCase): + def setUp(self): + super(RpcKombuTestCase, self).setUp() + self.conn = rpc.create_connection() + self.receiver = TestReceiver() + rpc.create_consumer(self.conn, + 'test', + self.receiver, + False) + self.conn.consume_in_thread() + self.context = context.get_admin_context() + + def tearDown(self): + self.conn.close() + super(RpcKombuTestCase, self).tearDown() + + def test_reusing_connection(self): + """Test that reusing a connection returns same one.""" + conn_context = rpc.create_connection(new=False) + conn1 = conn_context.connection + conn_context.close() + conn_context = rpc.create_connection(new=False) + conn2 = conn_context.connection + conn_context.close() + self.assertEqual(conn1, conn2) + + def test_topic_send_receive(self): + """Test sending to a topic exchange/queue""" + + conn = rpc.create_connection() + message = 'topic test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_topic_consumer('a_topic', _callback) + conn.topic_send('a_topic', message) + conn.consume(limit=1) + conn.close() + + self.assertEqual(self.received_message, message) + + def test_direct_send_receive(self): + """Test sending to a direct exchange/queue""" + conn = rpc.create_connection() + message = 'direct test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_direct_consumer('a_direct', _callback) + conn.direct_send('a_direct', message) + conn.consume(limit=1) + conn.close() + + self.assertEqual(self.received_message, message) + + @test.skip_test("kombu memory transport seems buggy with fanout queues " + "as this test passes when you use rabbit (fake_rabbit=False)") + def test_fanout_send_receive(self): + """Test sending to a fanout exchange and consuming from 2 queues""" + + conn = rpc.create_connection() + conn2 = rpc.create_connection() + message = 'fanout test message' + + self.received_message = None + + def _callback(message): + self.received_message = message + + conn.declare_fanout_consumer('a_fanout', _callback) + conn2.declare_fanout_consumer('a_fanout', _callback) + conn.fanout_send('a_fanout', message) + + conn.consume(limit=1) + conn.close() + self.assertEqual(self.received_message, message) + + self.received_message = None + conn2.consume(limit=1) + conn2.close() + self.assertEqual(self.received_message, message) + + def test_call_succeed(self): + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns(self): + value = 42 + result = rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + value = 42 + result = rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_multicall_succeed_once(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + for i, x in enumerate(result): + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times_yield(self): + value = 42 + result = rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_context_passed(self): + """Makes sure a context is passed through rpc call.""" + value = 42 + result = rpc.call(self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + + def test_call_exception(self): + """Test that exception gets passed back properly. + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + + """ + value = 42 + self.assertRaises(rpc.RemoteError, + rpc.call, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + try: + rpc.call(self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown rpc.RemoteError") + except rpc.RemoteError as exc: + self.assertEqual(int(exc.value), value) + + def test_nested_calls(self): + """Test that we can do an rpc.call inside another call.""" + class Nested(object): + @staticmethod + def echo(context, queue, value): + """Calls echo in the passed queue""" + LOG.debug(_("Nested received %(queue)s, %(value)s") + % locals()) + # TODO: so, it will replay the context and use the same REQID? + # that's bizarre. + ret = rpc.call(context, + queue, + {"method": "echo", + "args": {"value": value}}) + LOG.debug(_("Nested return %s"), ret) + return value + + nested = Nested() + conn = rpc.create_connection(True) + rpc.create_consumer(conn, + 'nested', + nested, + False) + conn.consume_in_thread() + value = 42 + result = rpc.call(self.context, + 'nested', {"method": "echo", + "args": {"queue": "test", + "value": value}}) + conn.close() + self.assertEqual(value, result) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. + + """ + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in.""" + LOG.debug(_("Received %s"), value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context.""" + LOG.debug(_("Received %s"), context) + return context.to_dict() + + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value + 1) + context.reply(value + 2) + + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in.""" + raise Exception(value) -- cgit From 4c2e9ae35b22e7ef2e3fdd20ed72bac115510ada Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 19:23:31 -0700 Subject: carrot consumer thread fix --- nova/rpc/impl_carrot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 07af0a116..d0e6f8269 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -118,13 +118,13 @@ class Connection(carrot_connection.BrokerConnection): consumer_set.wait() except greenlet.GreenletExit: return - if not self._rpc_consumer_thread: + if self._rpc_consumer_thread is None: self._rpc_consumer_thread = eventlet.spawn(_consumer_thread) return self._rpc_consumer_thread def cancel_consumer_thread(self): """Cancel a consumer thread""" - if self._rpc_consumer_thread: + if self._rpc_consumer_thread is not None: self._rpc_consumer_thread.kill() try: self._rpc_consumer_thread.wait() -- cgit From 53f796e0cfcec9d5c56dca86ee3c185625917dca Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 19:27:49 -0700 Subject: remove unused rpc connections in test_cloud and test_adminapi --- nova/tests/test_adminapi.py | 2 -- nova/tests/test_cloud.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py index 06cc498ac..aaa633adc 100644 --- a/nova/tests/test_adminapi.py +++ b/nova/tests/test_adminapi.py @@ -38,8 +38,6 @@ class AdminApiTestCase(test.TestCase): super(AdminApiTestCase, self).setUp() self.flags(connection_type='fake') - self.conn = rpc.create_connection() - # set up our cloud self.api = admin.AdminController() diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 0793784f8..14ab64f33 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -51,8 +51,6 @@ class CloudTestCase(test.TestCase): self.flags(connection_type='fake', stub_network=True) - self.conn = rpc.create_connection() - # set up our cloud self.cloud = cloud.CloudController() -- cgit From 599467124e812eb8ae73eb7a9af3fea71ee25157 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Sun, 28 Aug 2011 23:39:43 -0700 Subject: fix for assertIn and assertNotIn use which was added in python 2.7. this makes things work on 2.6 still --- nova/test.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/nova/test.py b/nova/test.py index 88f1489e8..d1c1ad20e 100644 --- a/nova/test.py +++ b/nova/test.py @@ -277,3 +277,21 @@ class TestCase(unittest.TestCase): continue else: self.assertEqual(sub_value, super_value) + + def assertIn(self, a, b): + """Python < v2.7 compatibility. Assert 'a' in 'b'""" + try: + f = super(TestCase, self).assertIn + except AttributeError: + self.assertTrue(a in b) + else: + f(a, b) + + def assertNotIn(self, a, b): + """Python < v2.7 compatibility. Assert 'a' NOT in 'b'""" + try: + f = super(TestCase, self).assertNotIn + except AttributeError: + self.assertFalse(a in b) + else: + f(a, b) -- cgit From 25cd526a72a98f184ed57fc85e7be2997305ce31 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 00:12:30 -0700 Subject: pep8 fixes --- nova/flags.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/nova/flags.py b/nova/flags.py index e09b4721a..774da4ab4 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -302,9 +302,12 @@ DEFINE_bool('rabbit_use_ssl', False, 'connect over SSL') DEFINE_string('rabbit_userid', 'guest', 'rabbit userid') DEFINE_string('rabbit_password', 'guest', 'rabbit password') DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') -DEFINE_integer('rabbit_retry_interval', 1, 'rabbit connection retry interval to start') -DEFINE_integer('rabbit_retry_backoff', 2, 'rabbit connection retry backoff in seconds') -DEFINE_integer('rabbit_max_retries', 0, 'maximum rabbit connection attempts (0=try forever)') +DEFINE_integer('rabbit_retry_interval', 1, + 'rabbit connection retry interval to start') +DEFINE_integer('rabbit_retry_backoff', 2, + 'rabbit connection retry backoff in seconds') +DEFINE_integer('rabbit_max_retries', 0, + 'maximum rabbit connection attempts (0=try forever)') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') DEFINE_boolean('rabbit_durable_queues', False, 'use durable queues') DEFINE_list('enabled_apis', ['ec2', 'osapi'], -- cgit From 392b2cae41ad35715940544a976edc0b9edadf9c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 00:50:17 -0700 Subject: removed broken assert for abstract_scheduler --- nova/scheduler/abstract_scheduler.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/scheduler/abstract_scheduler.py b/nova/scheduler/abstract_scheduler.py index e8c343a4b..7f17b642f 100644 --- a/nova/scheduler/abstract_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -180,7 +180,6 @@ class AbstractScheduler(driver.Scheduler): for zone_id, result in child_results: if not result: continue - assert isinstance(zone_id, int) for zone_rec in zones: if zone_rec['id'] != zone_id: -- cgit From 8bfa5e23e90279dfdbef3e38fca810ccca540513 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 01:13:08 -0700 Subject: support the extra optional arguments for msg to assertIn and assertNotIn --- nova/test.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nova/test.py b/nova/test.py index d1c1ad20e..d759aef60 100644 --- a/nova/test.py +++ b/nova/test.py @@ -278,20 +278,20 @@ class TestCase(unittest.TestCase): else: self.assertEqual(sub_value, super_value) - def assertIn(self, a, b): + def assertIn(self, a, b, *args, **kwargs): """Python < v2.7 compatibility. Assert 'a' in 'b'""" try: f = super(TestCase, self).assertIn except AttributeError: - self.assertTrue(a in b) + self.assertTrue(a in b, *args, **kwargs) else: - f(a, b) + f(a, b, *args, **kwargs) - def assertNotIn(self, a, b): + def assertNotIn(self, a, b, *args, **kwargs): """Python < v2.7 compatibility. Assert 'a' NOT in 'b'""" try: f = super(TestCase, self).assertNotIn except AttributeError: - self.assertFalse(a in b) + self.assertFalse(a in b, *args, **kwargs) else: - f(a, b) + f(a, b, *args, **kwargs) -- cgit From 39ca3df042bd3fa9a8ae2bf97d9383be7360d900 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Mon, 29 Aug 2011 09:45:00 -0400 Subject: Increased migration number. --- .../versions/043_update_instance_states.py | 138 --------------------- .../versions/044_update_instance_states.py | 138 +++++++++++++++++++++ 2 files changed, 138 insertions(+), 138 deletions(-) delete mode 100644 nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py deleted file mode 100644 index e58ae5362..000000000 --- a/nova/db/sqlalchemy/migrate_repo/versions/043_update_instance_states.py +++ /dev/null @@ -1,138 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sqlalchemy -from sqlalchemy import MetaData, Table, Column, String - -from nova.compute import task_states -from nova.compute import vm_states - - -meta = MetaData() - - -c_task_state = Column('task_state', - String(length=255, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False), - nullable=True) - - -_upgrade_translations = { - "stopping": { - "state_description": vm_states.ACTIVE, - "task_state": task_states.STOPPING, - }, - "stopped": { - "state_description": vm_states.STOPPED, - "task_state": None, - }, - "terminated": { - "state_description": vm_states.DELETED, - "task_state": None, - }, - "terminating": { - "state_description": vm_states.ACTIVE, - "task_state": task_states.DELETING, - }, - "running": { - "state_description": vm_states.ACTIVE, - "task_state": None, - }, - "scheduling": { - "state_description": vm_states.BUILDING, - "task_state": task_states.SCHEDULING, - }, - "migrating": { - "state_description": vm_states.MIGRATING, - "task_state": None, - }, - "pending": { - "state_description": vm_states.BUILDING, - "task_state": task_states.SCHEDULING, - }, -} - - -_downgrade_translations = { - vm_states.ACTIVE: { - None: "running", - task_states.DELETING: "terminating", - task_states.STOPPING: "stopping", - }, - vm_states.BUILDING: { - None: "pending", - task_states.SCHEDULING: "scheduling", - }, - vm_states.STOPPED: { - None: "stopped", - }, - vm_states.REBUILDING: { - None: "pending", - }, - vm_states.DELETED: { - None: "terminated", - }, - vm_states.MIGRATING: { - None: "migrating", - }, -} - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_state = instance_table.c.state - c_state.alter(name='power_state') - - c_vm_state = instance_table.c.state_description - c_vm_state.alter(name='vm_state') - - instance_table.create_column(c_task_state) - - for old_state, values in _upgrade_translations.iteritems(): - instance_table.update().\ - values(**values).\ - where(c_vm_state == old_state).\ - execute() - - -def downgrade(migrate_engine): - meta.bind = migrate_engine - - instance_table = Table('instances', meta, autoload=True, - autoload_with=migrate_engine) - - c_task_state = instance_table.c.task_state - - c_state = instance_table.c.power_state - c_state.alter(name='state') - - c_vm_state = instance_table.c.vm_state - c_vm_state.alter(name='state_description') - - for old_vm_state, old_task_states in _downgrade_translations.iteritems(): - for old_task_state, new_state_desc in old_task_states.iteritems(): - instance_table.update().\ - where(c_task_state == old_task_state).\ - where(c_vm_state == old_vm_state).\ - values(vm_state=new_state_desc).\ - execute() - - instance_table.drop_column('task_state') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py new file mode 100644 index 000000000..e58ae5362 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/044_update_instance_states.py @@ -0,0 +1,138 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sqlalchemy +from sqlalchemy import MetaData, Table, Column, String + +from nova.compute import task_states +from nova.compute import vm_states + + +meta = MetaData() + + +c_task_state = Column('task_state', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +_upgrade_translations = { + "stopping": { + "state_description": vm_states.ACTIVE, + "task_state": task_states.STOPPING, + }, + "stopped": { + "state_description": vm_states.STOPPED, + "task_state": None, + }, + "terminated": { + "state_description": vm_states.DELETED, + "task_state": None, + }, + "terminating": { + "state_description": vm_states.ACTIVE, + "task_state": task_states.DELETING, + }, + "running": { + "state_description": vm_states.ACTIVE, + "task_state": None, + }, + "scheduling": { + "state_description": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, + "migrating": { + "state_description": vm_states.MIGRATING, + "task_state": None, + }, + "pending": { + "state_description": vm_states.BUILDING, + "task_state": task_states.SCHEDULING, + }, +} + + +_downgrade_translations = { + vm_states.ACTIVE: { + None: "running", + task_states.DELETING: "terminating", + task_states.STOPPING: "stopping", + }, + vm_states.BUILDING: { + None: "pending", + task_states.SCHEDULING: "scheduling", + }, + vm_states.STOPPED: { + None: "stopped", + }, + vm_states.REBUILDING: { + None: "pending", + }, + vm_states.DELETED: { + None: "terminated", + }, + vm_states.MIGRATING: { + None: "migrating", + }, +} + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_state = instance_table.c.state + c_state.alter(name='power_state') + + c_vm_state = instance_table.c.state_description + c_vm_state.alter(name='vm_state') + + instance_table.create_column(c_task_state) + + for old_state, values in _upgrade_translations.iteritems(): + instance_table.update().\ + values(**values).\ + where(c_vm_state == old_state).\ + execute() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instance_table = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + c_task_state = instance_table.c.task_state + + c_state = instance_table.c.power_state + c_state.alter(name='state') + + c_vm_state = instance_table.c.vm_state + c_vm_state.alter(name='state_description') + + for old_vm_state, old_task_states in _downgrade_translations.iteritems(): + for old_task_state, new_state_desc in old_task_states.iteritems(): + instance_table.update().\ + where(c_task_state == old_task_state).\ + where(c_vm_state == old_vm_state).\ + values(vm_state=new_state_desc).\ + execute() + + instance_table.drop_column('task_state') -- cgit From 0972d9188b0b73fa357f75896ab3bebda9a2a9de Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Mon, 29 Aug 2011 10:13:39 -0400 Subject: Removed test_parallel_builds --- nova/tests/test_xenapi.py | 37 ------------------------------------- 1 file changed, 37 deletions(-) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 2f0559366..45dad3516 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -16,7 +16,6 @@ """Test suite for XenAPI.""" -import eventlet import functools import json import os @@ -203,42 +202,6 @@ class XenAPIVMTestCase(test.TestCase): self.context = context.RequestContext(self.user_id, self.project_id) self.conn = xenapi_conn.get_connection(False) - def test_parallel_builds(self): - stubs.stubout_loopingcall_delay(self.stubs) - - def _do_build(id, proj, user, *args): - values = { - 'id': id, - 'project_id': proj, - 'user_id': user, - 'image_ref': 1, - 'kernel_id': 2, - 'ramdisk_id': 3, - 'instance_type_id': '3', # m1.large - 'os_type': 'linux', - 'architecture': 'x86-64'} - network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, - {'broadcast': '192.168.0.255', - 'dns': ['192.168.0.1'], - 'gateway': '192.168.0.1', - 'gateway6': 'dead:beef::1', - 'ip6s': [{'enabled': '1', - 'ip': 'dead:beef::dcad:beff:feef:0', - 'netmask': '64'}], - 'ips': [{'enabled': '1', - 'ip': '192.168.0.100', - 'netmask': '255.255.255.0'}], - 'label': 'fake', - 'mac': 'DE:AD:BE:EF:00:00', - 'rxtx_cap': 3})] - instance = db.instance_create(self.context, values) - self.conn.spawn(self.context, instance, network_info) - - gt1 = eventlet.spawn(_do_build, 1, self.project_id, self.user_id) - gt2 = eventlet.spawn(_do_build, 2, self.project_id, self.user_id) - gt1.wait() - gt2.wait() - def test_list_instances_0(self): instances = self.conn.list_instances() self.assertEquals(instances, []) -- cgit -- cgit From 024b76a8df5c96d37dea0a05f66dfe4628a64a28 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Mon, 29 Aug 2011 10:27:25 -0700 Subject: more logging info to help identify bad payloads --- nova/notifier/api.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/notifier/api.py b/nova/notifier/api.py index 6ef4a050e..043838536 100644 --- a/nova/notifier/api.py +++ b/nova/notifier/api.py @@ -122,4 +122,5 @@ def notify(publisher_id, event_type, priority, payload): driver.notify(msg) except Exception, e: LOG.exception(_("Problem '%(e)s' attempting to " - "send to notification system." % locals())) + "send to notification system. Payload=%(payload)s" % + locals())) -- cgit From ee15f2a58217d522e23d811db4958e2e9b2338d6 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 14:36:12 -0700 Subject: ditched rpc.create_consumer(conn) interface... instead you now do conn.create_consumer(.. --- nova/rpc/__init__.py | 6 +---- nova/rpc/impl_carrot.py | 29 +++++++++++------------ nova/rpc/impl_kombu.py | 55 +++++++++++++++++++++---------------------- nova/service.py | 8 +++---- nova/tests/test_rpc.py | 10 ++------ nova/tests/test_rpc_carrot.py | 10 ++------ nova/tests/test_rpc_kombu.py | 10 ++------ nova/tests/test_test.py | 3 +-- 8 files changed, 52 insertions(+), 79 deletions(-) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 2a47ba87b..fe50fb476 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -24,7 +24,7 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', 'kombu', - "The messaging module to use, defaults to carrot.") + "The messaging module to use, defaults to kombu.") impl_table = {'kombu': 'nova.rpc.impl_kombu', 'amqp': 'nova.rpc.impl_kombu', @@ -41,10 +41,6 @@ def create_connection(new=True): return RPCIMPL.create_connection(new=new) -def create_consumer(conn, topic, proxy, fanout=False): - RPCIMPL.create_consumer(conn, topic, proxy, fanout) - - def call(context, topic, msg): return RPCIMPL.call(context, topic, msg) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index d0e6f8269..6d504aaec 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -132,6 +132,20 @@ class Connection(carrot_connection.BrokerConnection): pass self._rpc_consumer_thread = None + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls methods in the proxy""" + if fanout: + consumer = FanoutAdapterConsumer( + connection=self, + topic=topic, + proxy=proxy) + else: + consumer = TopicAdapterConsumer( + connection=self, + topic=topic, + proxy=proxy) + self._rpc_consumers.append(consumer) + class Pool(pools.Pool): """Class that implements a Pool of Connections.""" @@ -187,7 +201,6 @@ class Consumer(messaging.Consumer): LOG.error(_('Unable to connect to AMQP server ' 'after %(tries)d tries. Shutting down.') % locals()) sys.exit(1) - connection._rpc_consumers.append(self) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): """Wraps the parent fetch with some logic for failed connection.""" @@ -568,20 +581,6 @@ def create_connection(new=True): return Connection.instance(new=new) -def create_consumer(conn, topic, proxy, fanout=False): - """Create a consumer that calls methods in the proxy""" - if fanout: - return FanoutAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) - else: - return TopicAdapterConsumer( - connection=conn, - topic=topic, - proxy=proxy) - - def call(context, topic, msg): """Sends a message on a topic and wait for a response.""" rv = multicall(context, topic, msg) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 49bca1d81..83ee1b122 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -404,26 +404,6 @@ class Connection(object): '%s' % str(e))) self.reconnect() - def consume(self, limit=None): - """Consume from all queues/consumers""" - it = self.iterconsume(limit=limit) - while True: - try: - it.next() - except StopIteration: - return - - def consume_in_thread(self): - """Consumer from all queues/consumers in a greenthread""" - def _consumer_thread(): - try: - self.consume() - except greenlet.GreenletExit: - return - if self.consumer_thread is None: - self.consumer_thread = eventlet.spawn(_consumer_thread) - return self.consumer_thread - def cancel_consumer_thread(self): """Cancel a consumer thread""" if self.consumer_thread is not None: @@ -478,6 +458,33 @@ class Connection(object): """Send a 'fanout' message""" self.publisher_send(FanoutPublisher, topic, msg) + def consume(self, limit=None): + """Consume from all queues/consumers""" + it = self.iterconsume(limit=limit) + while True: + try: + it.next() + except StopIteration: + return + + def consume_in_thread(self): + """Consumer from all queues/consumers in a greenthread""" + def _consumer_thread(): + try: + self.consume() + except greenlet.GreenletExit: + return + if self.consumer_thread is None: + self.consumer_thread = eventlet.spawn(_consumer_thread) + return self.consumer_thread + + def create_consumer(self, topic, proxy, fanout=False): + """Create a consumer that calls a method in a proxy object""" + if fanout: + self.declare_fanout_consumer(topic, ProxyCallback(proxy)) + else: + self.declare_topic_consumer(topic, ProxyCallback(proxy)) + class Pool(pools.Pool): """Class that implements a Pool of Connections.""" @@ -678,14 +685,6 @@ def create_connection(new=True): return ConnectionContext(pooled=not new) -def create_consumer(conn, topic, proxy, fanout=False): - """Create a consumer that calls a method in a proxy object""" - if fanout: - conn.declare_fanout_consumer(topic, ProxyCallback(proxy)) - else: - conn.declare_topic_consumer(topic, ProxyCallback(proxy)) - - def multicall(context, topic, msg): """Make a call that returns multiple times.""" # Can't use 'with' for multicall, as it returns an iterator diff --git a/nova/service.py b/nova/service.py index ab7925eb3..247eb4fb1 100644 --- a/nova/service.py +++ b/nova/service.py @@ -153,14 +153,12 @@ class Service(object): self.topic) # Share this same connection for these Consumers - rpc.create_consumer(self.conn, self.topic, self, - fanout=False) + self.conn.create_consumer(self.topic, self, fanout=False) node_topic = '%s.%s' % (self.topic, self.host) - rpc.create_consumer(self.conn, node_topic, self, - fanout=False) + self.conn.create_consumer(node_topic, self, fanout=False) - rpc.create_consumer(self.conn, self.topic, self, fanout=True) + self.conn.create_consumer(self.topic, self, fanout=True) # Consume from all consumers in a thread self.conn.consume_in_thread() diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index 2b9922491..ba91ea3b2 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -33,10 +33,7 @@ class RpcTestCase(test.TestCase): super(RpcTestCase, self).setUp() self.conn = rpc.create_connection(True) self.receiver = TestReceiver() - rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) + self.conn.create_consumer('test', self.receiver, False) self.conn.consume_in_thread() self.context = context.get_admin_context() @@ -143,10 +140,7 @@ class RpcTestCase(test.TestCase): nested = Nested() conn = rpc.create_connection(True) - rpc.create_consumer(conn, - 'nested', - nested, - False) + conn.create_consumer('nested', nested, False) conn.consume_in_thread() value = 42 result = rpc.call(self.context, diff --git a/nova/tests/test_rpc_carrot.py b/nova/tests/test_rpc_carrot.py index cf84980ab..ff704ecf8 100644 --- a/nova/tests/test_rpc_carrot.py +++ b/nova/tests/test_rpc_carrot.py @@ -33,10 +33,7 @@ class RpcCarrotTestCase(test.TestCase): super(RpcCarrotTestCase, self).setUp() self.conn = rpc.create_connection(True) self.receiver = TestReceiver() - rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) + self.conn.create_consumer('test', self.receiver, False) self.conn.consume_in_thread() self.context = context.get_admin_context() @@ -151,10 +148,7 @@ class RpcCarrotTestCase(test.TestCase): nested = Nested() conn = rpc.create_connection(True) - rpc.create_consumer(conn, - 'nested', - nested, - False) + conn.create_consumer('nested', nested, False) conn.consume_in_thread() value = 42 result = rpc.call(self.context, diff --git a/nova/tests/test_rpc_kombu.py b/nova/tests/test_rpc_kombu.py index 457dfdeca..7db88ecd0 100644 --- a/nova/tests/test_rpc_kombu.py +++ b/nova/tests/test_rpc_kombu.py @@ -33,10 +33,7 @@ class RpcKombuTestCase(test.TestCase): super(RpcKombuTestCase, self).setUp() self.conn = rpc.create_connection() self.receiver = TestReceiver() - rpc.create_consumer(self.conn, - 'test', - self.receiver, - False) + self.conn.create_consumer('test', self.receiver, False) self.conn.consume_in_thread() self.context = context.get_admin_context() @@ -215,10 +212,7 @@ class RpcKombuTestCase(test.TestCase): nested = Nested() conn = rpc.create_connection(True) - rpc.create_consumer(conn, - 'nested', - nested, - False) + conn.create_consumer('nested', nested, False) conn.consume_in_thread() value = 42 result = rpc.call(self.context, diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py index 6075abbb0..3482ff6a0 100644 --- a/nova/tests/test_test.py +++ b/nova/tests/test_test.py @@ -40,6 +40,5 @@ class IsolationTestCase(test.TestCase): connection = rpc.create_connection(new=True) proxy = NeverCalled() - rpc.create_consumer(connection, 'compute', - proxy, fanout=False) + connection.create_consumer('compute', proxy, fanout=False) connection.consume_in_thread() -- cgit From a16efa7b94a15040657b961b0fd29a4d2720ef21 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 14:54:20 -0700 Subject: created nova/tests/test_rpc_common.py which contains a rpc test base class so we can share tests between the rpc implementations --- nova/tests/test_rpc.py | 157 +---------------------------------- nova/tests/test_rpc_carrot.py | 169 ++----------------------------------- nova/tests/test_rpc_common.py | 188 ++++++++++++++++++++++++++++++++++++++++++ nova/tests/test_rpc_kombu.py | 172 +++----------------------------------- 4 files changed, 211 insertions(+), 475 deletions(-) create mode 100644 nova/tests/test_rpc_common.py diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py index ba91ea3b2..6b4454747 100644 --- a/nova/tests/test_rpc.py +++ b/nova/tests/test_rpc.py @@ -22,167 +22,16 @@ Unit Tests for remote procedure calls using queue from nova import context from nova import log as logging from nova import rpc -from nova import test +from nova.tests import test_rpc_common LOG = logging.getLogger('nova.tests.rpc') -class RpcTestCase(test.TestCase): +class RpcTestCase(test_rpc_common._BaseRpcTestCase): def setUp(self): + self.rpc = rpc super(RpcTestCase, self).setUp() - self.conn = rpc.create_connection(True) - self.receiver = TestReceiver() - self.conn.create_consumer('test', self.receiver, False) - self.conn.consume_in_thread() - self.context = context.get_admin_context() def tearDown(self): - self.conn.close() super(RpcTestCase, self).tearDown() - - def test_call_succeed(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo", - "args": {"value": value}}) - self.assertEqual(value, result) - - def test_call_succeed_despite_multiple_returns(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo_three_times", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_call_succeed_despite_multiple_returns_yield(self): - value = 42 - result = rpc.call(self.context, 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_multicall_succeed_once(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo", - "args": {"value": value}}) - for i, x in enumerate(result): - if i > 0: - self.fail('should only receive one response') - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times_yield(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_context_passed(self): - """Makes sure a context is passed through rpc call.""" - value = 42 - result = rpc.call(self.context, - 'test', {"method": "context", - "args": {"value": value}}) - self.assertEqual(self.context.to_dict(), result) - - def test_call_exception(self): - """Test that exception gets passed back properly. - - rpc.call returns a RemoteError object. The value of the - exception is converted to a string, so we convert it back - to an int in the test. - - """ - value = 42 - self.assertRaises(rpc.RemoteError, - rpc.call, - self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - try: - rpc.call(self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - self.fail("should have thrown rpc.RemoteError") - except rpc.RemoteError as exc: - self.assertEqual(int(exc.value), value) - - def test_nested_calls(self): - """Test that we can do an rpc.call inside another call.""" - class Nested(object): - @staticmethod - def echo(context, queue, value): - """Calls echo in the passed queue""" - LOG.debug(_("Nested received %(queue)s, %(value)s") - % locals()) - # TODO: so, it will replay the context and use the same REQID? - # that's bizarre. - ret = rpc.call(context, - queue, - {"method": "echo", - "args": {"value": value}}) - LOG.debug(_("Nested return %s"), ret) - return value - - nested = Nested() - conn = rpc.create_connection(True) - conn.create_consumer('nested', nested, False) - conn.consume_in_thread() - value = 42 - result = rpc.call(self.context, - 'nested', {"method": "echo", - "args": {"queue": "test", - "value": value}}) - conn.close() - self.assertEqual(value, result) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) diff --git a/nova/tests/test_rpc_carrot.py b/nova/tests/test_rpc_carrot.py index ff704ecf8..57cdebf4f 100644 --- a/nova/tests/test_rpc_carrot.py +++ b/nova/tests/test_rpc_carrot.py @@ -16,181 +16,30 @@ # License for the specific language governing permissions and limitations # under the License. """ -Unit Tests for remote procedure calls using queue +Unit Tests for remote procedure calls using carrot """ from nova import context from nova import log as logging -from nova.rpc import impl_carrot as rpc -from nova import test +from nova.rpc import impl_carrot +from nova.tests import test_rpc_common LOG = logging.getLogger('nova.tests.rpc') -class RpcCarrotTestCase(test.TestCase): +class RpcCarrotTestCase(test_rpc_common._BaseRpcTestCase): def setUp(self): + self.rpc = impl_carrot super(RpcCarrotTestCase, self).setUp() - self.conn = rpc.create_connection(True) - self.receiver = TestReceiver() - self.conn.create_consumer('test', self.receiver, False) - self.conn.consume_in_thread() - self.context = context.get_admin_context() def tearDown(self): - self.conn.close() super(RpcCarrotTestCase, self).tearDown() def test_connectionpool_single(self): """Test that ConnectionPool recycles a single connection.""" - conn1 = rpc.ConnectionPool.get() - rpc.ConnectionPool.put(conn1) - conn2 = rpc.ConnectionPool.get() - rpc.ConnectionPool.put(conn2) + conn1 = self.rpc.ConnectionPool.get() + self.rpc.ConnectionPool.put(conn1) + conn2 = self.rpc.ConnectionPool.get() + self.rpc.ConnectionPool.put(conn2) self.assertEqual(conn1, conn2) - - def test_call_succeed(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo", - "args": {"value": value}}) - self.assertEqual(value, result) - - def test_call_succeed_despite_multiple_returns(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo_three_times", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_call_succeed_despite_multiple_returns_yield(self): - value = 42 - result = rpc.call(self.context, 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_multicall_succeed_once(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo", - "args": {"value": value}}) - for i, x in enumerate(result): - if i > 0: - self.fail('should only receive one response') - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times_yield(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_context_passed(self): - """Makes sure a context is passed through rpc call.""" - value = 42 - result = rpc.call(self.context, - 'test', {"method": "context", - "args": {"value": value}}) - self.assertEqual(self.context.to_dict(), result) - - def test_call_exception(self): - """Test that exception gets passed back properly. - - rpc.call returns a RemoteError object. The value of the - exception is converted to a string, so we convert it back - to an int in the test. - - """ - value = 42 - self.assertRaises(rpc.RemoteError, - rpc.call, - self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - try: - rpc.call(self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - self.fail("should have thrown rpc.RemoteError") - except rpc.RemoteError as exc: - self.assertEqual(int(exc.value), value) - - def test_nested_calls(self): - """Test that we can do an rpc.call inside another call.""" - class Nested(object): - @staticmethod - def echo(context, queue, value): - """Calls echo in the passed queue""" - LOG.debug(_("Nested received %(queue)s, %(value)s") - % locals()) - # TODO: so, it will replay the context and use the same REQID? - # that's bizarre. - ret = rpc.call(context, - queue, - {"method": "echo", - "args": {"value": value}}) - LOG.debug(_("Nested return %s"), ret) - return value - - nested = Nested() - conn = rpc.create_connection(True) - conn.create_consumer('nested', nested, False) - conn.consume_in_thread() - value = 42 - result = rpc.call(self.context, - 'nested', {"method": "echo", - "args": {"queue": "test", - "value": value}}) - conn.close() - self.assertEqual(value, result) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) diff --git a/nova/tests/test_rpc_common.py b/nova/tests/test_rpc_common.py new file mode 100644 index 000000000..b922be1df --- /dev/null +++ b/nova/tests/test_rpc_common.py @@ -0,0 +1,188 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for remote procedure calls shared between all implementations +""" + +from nova import context +from nova import log as logging +from nova.rpc.common import RemoteError +from nova import test + + +LOG = logging.getLogger('nova.tests.rpc') + + +class _BaseRpcTestCase(test.TestCase): + def setUp(self): + super(_BaseRpcTestCase, self).setUp() + self.conn = self.rpc.create_connection(True) + self.receiver = TestReceiver() + self.conn.create_consumer('test', self.receiver, False) + self.conn.consume_in_thread() + self.context = context.get_admin_context() + + def tearDown(self): + self.conn.close() + super(_BaseRpcTestCase, self).tearDown() + + def test_call_succeed(self): + value = 42 + result = self.rpc.call(self.context, 'test', {"method": "echo", + "args": {"value": value}}) + self.assertEqual(value, result) + + def test_call_succeed_despite_multiple_returns(self): + value = 42 + result = self.rpc.call(self.context, 'test', {"method": "echo_three_times", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_call_succeed_despite_multiple_returns_yield(self): + value = 42 + result = self.rpc.call(self.context, 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + self.assertEqual(value + 2, result) + + def test_multicall_succeed_once(self): + value = 42 + result = self.rpc.multicall(self.context, + 'test', + {"method": "echo", + "args": {"value": value}}) + for i, x in enumerate(result): + if i > 0: + self.fail('should only receive one response') + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times(self): + value = 42 + result = self.rpc.multicall(self.context, + 'test', + {"method": "echo_three_times", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_multicall_succeed_three_times_yield(self): + value = 42 + result = self.rpc.multicall(self.context, + 'test', + {"method": "echo_three_times_yield", + "args": {"value": value}}) + for i, x in enumerate(result): + self.assertEqual(value + i, x) + + def test_context_passed(self): + """Makes sure a context is passed through rpc call.""" + value = 42 + result = self.rpc.call(self.context, + 'test', {"method": "context", + "args": {"value": value}}) + self.assertEqual(self.context.to_dict(), result) + + def test_call_exception(self): + """Test that exception gets passed back properly. + + rpc.call returns a RemoteError object. The value of the + exception is converted to a string, so we convert it back + to an int in the test. + + """ + value = 42 + self.assertRaises(RemoteError, + self.rpc.call, + self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + try: + self.rpc.call(self.context, + 'test', + {"method": "fail", + "args": {"value": value}}) + self.fail("should have thrown RemoteError") + except RemoteError as exc: + self.assertEqual(int(exc.value), value) + + def test_nested_calls(self): + """Test that we can do an rpc.call inside another call.""" + class Nested(object): + @staticmethod + def echo(context, queue, value): + """Calls echo in the passed queue""" + LOG.debug(_("Nested received %(queue)s, %(value)s") + % locals()) + # TODO: so, it will replay the context and use the same REQID? + # that's bizarre. + ret = self.rpc.call(context, + queue, + {"method": "echo", + "args": {"value": value}}) + LOG.debug(_("Nested return %s"), ret) + return value + + nested = Nested() + conn = self.rpc.create_connection(True) + conn.create_consumer('nested', nested, False) + conn.consume_in_thread() + value = 42 + result = self.rpc.call(self.context, + 'nested', {"method": "echo", + "args": {"queue": "test", + "value": value}}) + conn.close() + self.assertEqual(value, result) + + +class TestReceiver(object): + """Simple Proxy class so the consumer has methods to call. + + Uses static methods because we aren't actually storing any state. + + """ + + @staticmethod + def echo(context, value): + """Simply returns whatever value is sent in.""" + LOG.debug(_("Received %s"), value) + return value + + @staticmethod + def context(context, value): + """Returns dictionary version of context.""" + LOG.debug(_("Received %s"), context) + return context.to_dict() + + @staticmethod + def echo_three_times(context, value): + context.reply(value) + context.reply(value + 1) + context.reply(value + 2) + + @staticmethod + def echo_three_times_yield(context, value): + yield value + yield value + 1 + yield value + 2 + + @staticmethod + def fail(context, value): + """Raises an exception with the value sent in.""" + raise Exception(value) diff --git a/nova/tests/test_rpc_kombu.py b/nova/tests/test_rpc_kombu.py index 7db88ecd0..101ed14af 100644 --- a/nova/tests/test_rpc_kombu.py +++ b/nova/tests/test_rpc_kombu.py @@ -16,37 +16,33 @@ # License for the specific language governing permissions and limitations # under the License. """ -Unit Tests for remote procedure calls using queue +Unit Tests for remote procedure calls using kombu """ from nova import context from nova import log as logging -from nova.rpc import impl_kombu as rpc from nova import test +from nova.rpc import impl_kombu +from nova.tests import test_rpc_common LOG = logging.getLogger('nova.tests.rpc') -class RpcKombuTestCase(test.TestCase): +class RpcKombuTestCase(test_rpc_common._BaseRpcTestCase): def setUp(self): + self.rpc = impl_kombu super(RpcKombuTestCase, self).setUp() - self.conn = rpc.create_connection() - self.receiver = TestReceiver() - self.conn.create_consumer('test', self.receiver, False) - self.conn.consume_in_thread() - self.context = context.get_admin_context() def tearDown(self): - self.conn.close() super(RpcKombuTestCase, self).tearDown() def test_reusing_connection(self): """Test that reusing a connection returns same one.""" - conn_context = rpc.create_connection(new=False) + conn_context = self.rpc.create_connection(new=False) conn1 = conn_context.connection conn_context.close() - conn_context = rpc.create_connection(new=False) + conn_context = self.rpc.create_connection(new=False) conn2 = conn_context.connection conn_context.close() self.assertEqual(conn1, conn2) @@ -54,7 +50,7 @@ class RpcKombuTestCase(test.TestCase): def test_topic_send_receive(self): """Test sending to a topic exchange/queue""" - conn = rpc.create_connection() + conn = self.rpc.create_connection() message = 'topic test message' self.received_message = None @@ -71,7 +67,7 @@ class RpcKombuTestCase(test.TestCase): def test_direct_send_receive(self): """Test sending to a direct exchange/queue""" - conn = rpc.create_connection() + conn = self.rpc.create_connection() message = 'direct test message' self.received_message = None @@ -91,8 +87,8 @@ class RpcKombuTestCase(test.TestCase): def test_fanout_send_receive(self): """Test sending to a fanout exchange and consuming from 2 queues""" - conn = rpc.create_connection() - conn2 = rpc.create_connection() + conn = self.rpc.create_connection() + conn2 = self.rpc.create_connection() message = 'fanout test message' self.received_message = None @@ -112,149 +108,3 @@ class RpcKombuTestCase(test.TestCase): conn2.consume(limit=1) conn2.close() self.assertEqual(self.received_message, message) - - def test_call_succeed(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo", - "args": {"value": value}}) - self.assertEqual(value, result) - - def test_call_succeed_despite_multiple_returns(self): - value = 42 - result = rpc.call(self.context, 'test', {"method": "echo_three_times", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_call_succeed_despite_multiple_returns_yield(self): - value = 42 - result = rpc.call(self.context, 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - self.assertEqual(value + 2, result) - - def test_multicall_succeed_once(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo", - "args": {"value": value}}) - for i, x in enumerate(result): - if i > 0: - self.fail('should only receive one response') - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_multicall_succeed_three_times_yield(self): - value = 42 - result = rpc.multicall(self.context, - 'test', - {"method": "echo_three_times_yield", - "args": {"value": value}}) - for i, x in enumerate(result): - self.assertEqual(value + i, x) - - def test_context_passed(self): - """Makes sure a context is passed through rpc call.""" - value = 42 - result = rpc.call(self.context, - 'test', {"method": "context", - "args": {"value": value}}) - self.assertEqual(self.context.to_dict(), result) - - def test_call_exception(self): - """Test that exception gets passed back properly. - - rpc.call returns a RemoteError object. The value of the - exception is converted to a string, so we convert it back - to an int in the test. - - """ - value = 42 - self.assertRaises(rpc.RemoteError, - rpc.call, - self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - try: - rpc.call(self.context, - 'test', - {"method": "fail", - "args": {"value": value}}) - self.fail("should have thrown rpc.RemoteError") - except rpc.RemoteError as exc: - self.assertEqual(int(exc.value), value) - - def test_nested_calls(self): - """Test that we can do an rpc.call inside another call.""" - class Nested(object): - @staticmethod - def echo(context, queue, value): - """Calls echo in the passed queue""" - LOG.debug(_("Nested received %(queue)s, %(value)s") - % locals()) - # TODO: so, it will replay the context and use the same REQID? - # that's bizarre. - ret = rpc.call(context, - queue, - {"method": "echo", - "args": {"value": value}}) - LOG.debug(_("Nested return %s"), ret) - return value - - nested = Nested() - conn = rpc.create_connection(True) - conn.create_consumer('nested', nested, False) - conn.consume_in_thread() - value = 42 - result = rpc.call(self.context, - 'nested', {"method": "echo", - "args": {"queue": "test", - "value": value}}) - conn.close() - self.assertEqual(value, result) - - -class TestReceiver(object): - """Simple Proxy class so the consumer has methods to call. - - Uses static methods because we aren't actually storing any state. - - """ - - @staticmethod - def echo(context, value): - """Simply returns whatever value is sent in.""" - LOG.debug(_("Received %s"), value) - return value - - @staticmethod - def context(context, value): - """Returns dictionary version of context.""" - LOG.debug(_("Received %s"), context) - return context.to_dict() - - @staticmethod - def echo_three_times(context, value): - context.reply(value) - context.reply(value + 1) - context.reply(value + 2) - - @staticmethod - def echo_three_times_yield(context, value): - yield value - yield value + 1 - yield value + 2 - - @staticmethod - def fail(context, value): - """Raises an exception with the value sent in.""" - raise Exception(value) -- cgit From db27d93bc195598a5dd0e7a35480281447cf4ea1 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 15:08:32 -0700 Subject: doc string cleanup --- nova/rpc/impl_kombu.py | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 83ee1b122..ffd6447da 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -290,7 +290,7 @@ class FanoutPublisher(Publisher): class Connection(object): - """Connection instance object.""" + """Connection object.""" def __init__(self): self.consumers = [] @@ -503,7 +503,18 @@ ConnectionPool = Pool( class ConnectionContext(object): + """The class that is actually returned to the caller of + create_connection(). This is a essentially a wrapper around + Connection that supports 'with' and can return a new Connection or + one from a pool. It will also catch when an instance of this class + is to be deleted so that we can return Connections to the pool on + exceptions and so forth without making the caller be responsible for + catching all exceptions and making sure to return a connection to + the pool. + """ + def __init__(self, pooled=True): + """Create a new connection, or get one from the pool""" self.connection = None if pooled: self.connection = ConnectionPool.get() @@ -512,9 +523,13 @@ class ConnectionContext(object): self.pooled = pooled def __enter__(self): + """with ConnectionContext() should return self""" return self def _done(self): + """If the connection came from a pool, clean it up and put it back. + If it did not come from a pool, close it. + """ if self.connection: if self.pooled: # Reset the connection so it's ready for the next caller @@ -533,19 +548,19 @@ class ConnectionContext(object): self.connection = None def __exit__(self, t, v, tb): - """end if 'with' statement. We're done here.""" + """end of 'with' statement. We're done here.""" self._done() def __del__(self): - """Put Connection back into the pool if this ConnectionContext - is being deleted - """ + """Caller is done with this connection. Make sure we cleaned up.""" self._done() def close(self): + """Caller is done with this connection.""" self._done() def __getattr__(self, key): + """Proxy all other calls to the Connection instance""" if self.connection: return getattr(self.connection, key) else: @@ -637,6 +652,7 @@ def _pack_context(msg, context): class RpcContext(context.RequestContext): + """Context that supports replying to a rpc.call""" def __init__(self, *args, **kwargs): msg_id = kwargs.pop('msg_id', None) self.msg_id = msg_id @@ -656,7 +672,7 @@ class MulticallWaiter(object): def done(self): self._done = True - self._connection = None + self._connection.close() def __call__(self, data): """The consume() callback will call this. Store the result.""" @@ -666,6 +682,7 @@ class MulticallWaiter(object): self._result = data['result'] def __iter__(self): + """Return a result until we get a 'None' response from consumer""" if self._done: raise StopIteration while True: -- cgit From 468ed475207b023cfa3eada48338d34375f55be2 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 15:15:58 -0700 Subject: fix ajax console proxy for new create_consumer method --- bin/nova-ajax-console-proxy | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index b3205ec56..23fb42fb5 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -114,8 +114,7 @@ class AjaxConsoleProxy(object): {'args': kwargs, 'last_activity': time.time()} self.conn = rpc.create_connection(new=True) - rpc.create_consumer( - self.conn, + self.conn.create_consumer( FLAGS.ajax_console_proxy_topic, TopicProxy) -- cgit From 345afb31678a1f94fcca6d63a4ab506e537c3a9c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 15:25:54 -0700 Subject: pep8 fix for test_rpc_common.py --- nova/tests/test_rpc_common.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_rpc_common.py b/nova/tests/test_rpc_common.py index b922be1df..4ab4e8a0e 100644 --- a/nova/tests/test_rpc_common.py +++ b/nova/tests/test_rpc_common.py @@ -49,8 +49,9 @@ class _BaseRpcTestCase(test.TestCase): def test_call_succeed_despite_multiple_returns(self): value = 42 - result = self.rpc.call(self.context, 'test', {"method": "echo_three_times", - "args": {"value": value}}) + result = self.rpc.call(self.context, 'test', + {"method": "echo_three_times", + "args": {"value": value}}) self.assertEqual(value + 2, result) def test_call_succeed_despite_multiple_returns_yield(self): -- cgit From 8965e567ce25e6b9718f1bca60b35f586bab985f Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Mon, 29 Aug 2011 15:26:26 -0700 Subject: remove unneeded connection= in carrot Consumer init --- nova/rpc/impl_carrot.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 6d504aaec..1d23c1853 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -171,7 +171,6 @@ class Consumer(messaging.Consumer): """ def __init__(self, *args, **kwargs): - connection = kwargs.get('connection') max_retries = FLAGS.rabbit_max_retries sleep_time = FLAGS.rabbit_retry_interval tries = 0 -- cgit From ebd47b7cb397f33c1e7c9f32dd5b77f7fd5d6642 Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Mon, 29 Aug 2011 22:27:28 -0400 Subject: Update RequestContext so that it correctly sets self.is_admin from the roles array. Additionally add a bit of code to ignore case as well. Resolves issues when accessing admin API's w/ Keystone. --- nova/context.py | 2 +- nova/tests/test_context.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 nova/tests/test_context.py diff --git a/nova/context.py b/nova/context.py index b917a1d81..5c22641a0 100644 --- a/nova/context.py +++ b/nova/context.py @@ -38,7 +38,7 @@ class RequestContext(object): self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: - self.admin = 'admin' in self.roles + self.is_admin = 'admin' in [x.lower() for x in self.roles] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: diff --git a/nova/tests/test_context.py b/nova/tests/test_context.py new file mode 100644 index 000000000..b2507fa59 --- /dev/null +++ b/nova/tests/test_context.py @@ -0,0 +1,33 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import context +from nova import test + + +class ContextTestCase(test.TestCase): + + def test_request_context_sets_is_admin(self): + ctxt = context.RequestContext('111', + '222', + roles=['admin', 'weasel']) + self.assertEquals(ctxt.is_admin, True) + + def test_request_context_sets_is_admin_upcase(self): + ctxt = context.RequestContext('111', + '222', + roles=['Admin', 'weasel']) + self.assertEquals(ctxt.is_admin, True) -- cgit From a635027ddbeb73dfad8bbf2890f67cb1ed7511bf Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 30 Aug 2011 00:03:39 -0700 Subject: disassociate floating ips before re-associating, and prevent re-association of already associated floating ips in manager --- nova/exception.py | 4 ++ nova/network/api.py | 6 ++ nova/network/manager.py | 7 +++ .../api/openstack/contrib/test_floating_ips.py | 68 ++++++++++++++++++++-- nova/tests/test_network.py | 16 +++++ 5 files changed, 96 insertions(+), 5 deletions(-) diff --git a/nova/exception.py b/nova/exception.py index 32981f4d5..b54981963 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -533,6 +533,10 @@ class NoMoreFloatingIps(FloatingIpNotFound): message = _("Zero floating ips available.") +class FloatingIpAlreadyInUse(NovaException): + message = _("Floating ip %(address) already in use by %(fixed_ip).") + + class NoFloatingIpsDefined(NotFound): message = _("Zero floating ips exist.") diff --git a/nova/network/api.py b/nova/network/api.py index d04474df3..78580d360 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -111,6 +111,12 @@ class API(base.Base): '(%(project)s)') % {'address': floating_ip['address'], 'project': context.project_id}) + + # If this address has been previously associated to a + # different instance, disassociate the floating_ip + if floating_ip['fixed_ip'] and floating_ip['fixed_ip'] is not fixed_ip: + self.disassociate_floating_ip(context, floating_ip['address']) + # NOTE(vish): if we are multi_host, send to the instances host if fixed_ip['network']['multi_host']: host = fixed_ip['instance']['host'] diff --git a/nova/network/manager.py b/nova/network/manager.py index b4605eea5..e6b30d1a0 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -280,6 +280,13 @@ class FloatingIP(object): def associate_floating_ip(self, context, floating_address, fixed_address): """Associates an floating ip to a fixed ip.""" + floating_ip = self.db.floating_ip_get_by_address(context, + floating_address) + if floating_ip['fixed_ip']: + raise exception.FloatingIpAlreadyInUse( + address=floating_ip['address'], + fixed_ip=floating_ip['fixed_ip']['address']) + self.db.floating_ip_fixed_ip_associate(context, floating_address, fixed_address) diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index 568faf867..c14c29bf3 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -23,6 +23,7 @@ from nova import db from nova import test from nova import network from nova.tests.api.openstack import fakes +from nova.tests.api.openstack import test_servers from nova.api.openstack.contrib.floating_ips import FloatingIPController @@ -60,10 +61,38 @@ def compute_api_associate(self, context, instance_id, floating_ip): pass +def network_api_associate(self, context, floating_ip, fixed_ip): + pass + + def network_api_disassociate(self, context, floating_address): pass +def network_get_instance_nw_info(self, context, instance): + info = { + 'label': 'fake', + 'gateway': 'fake', + 'dhcp_server': 'fake', + 'broadcast': 'fake', + 'mac': 'fake', + 'vif_uuid': 'fake', + 'rxtx_cap': 'fake', + 'dns': [], + 'ips': [{'ip': '10.0.0.1'}], + 'should_create_bridge': False, + 'should_create_vlan': False} + + return [['ignore', info]] + + +def fake_instance_get(context, instance_id): + return { + "id": 1, + "user_id": 'fakeuser', + "project_id": '123'} + + class FloatingIpTest(test.TestCase): address = "10.10.10.10" @@ -79,9 +108,6 @@ class FloatingIpTest(test.TestCase): def setUp(self): super(FloatingIpTest, self).setUp() - self.controller = FloatingIPController() - fakes.stub_out_networking(self.stubs) - fakes.stub_out_rate_limiting(self.stubs) self.stubs.Set(network.api.API, "get_floating_ip", network_api_get_floating_ip) self.stubs.Set(network.api.API, "get_floating_ip_by_ip", @@ -92,10 +118,13 @@ class FloatingIpTest(test.TestCase): network_api_allocate) self.stubs.Set(network.api.API, "release_floating_ip", network_api_release) - self.stubs.Set(compute.api.API, "associate_floating_ip", - compute_api_associate) self.stubs.Set(network.api.API, "disassociate_floating_ip", network_api_disassociate) + self.stubs.Set(network.api.API, "get_instance_nw_info", + network_get_instance_nw_info) + self.stubs.Set(db.api, 'instance_get', + fake_instance_get) + self.context = context.get_admin_context() self._create_floating_ip() @@ -165,6 +194,8 @@ class FloatingIpTest(test.TestCase): self.assertEqual(res.status_int, 202) def test_add_floating_ip_to_instance(self): + self.stubs.Set(network.api.API, "associate_floating_ip", + network_api_associate) body = dict(addFloatingIp=dict(address='11.0.0.1')) req = webob.Request.blank('/v1.1/123/servers/test_inst/action') req.method = "POST" @@ -174,6 +205,33 @@ class FloatingIpTest(test.TestCase): resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 202) + def test_add_associated_floating_ip_to_instance(self): + def fake_fixed_ip_get_by_address(ctx, address, session=None): + return {'address': address, 'network': {'multi_host': None, + 'host': 'fake'}} + + self.disassociated = False + + def fake_network_api_disassociate(local_self, ctx, floating_address): + self.disassociated = True + + db.floating_ip_update(self.context, self.address, {'project_id': '123', + 'fixed_ip_id': 1}) + self.stubs.Set(network.api.API, "disassociate_floating_ip", + fake_network_api_disassociate) + self.stubs.Set(db.api, "fixed_ip_get_by_address", + fake_fixed_ip_get_by_address) + + body = dict(addFloatingIp=dict(address=self.address)) + req = webob.Request.blank('/v1.1/123/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 202) + self.assertTrue(self.disassociated) + def test_remove_floating_ip_from_instance(self): body = dict(removeFloatingIp=dict(address='11.0.0.1')) req = webob.Request.blank('/v1.1/123/servers/test_inst/action') diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 0b8539442..25ff940f0 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -371,6 +371,22 @@ class VlanNetworkTestCase(test.TestCase): self.mox.ReplayAll() self.network.validate_networks(self.context, requested_networks) + def test_cant_associate_associated_floating_ip(self): + ctxt = context.RequestContext('testuser', 'testproject', + is_admin=False) + + def fake_floating_ip_get_by_address(context, address): + return {'address': '10.10.10.10', + 'fixed_ip': {'address': '10.0.0.1'}} + self.stubs.Set(self.network.db, 'floating_ip_get_by_address', + fake_floating_ip_get_by_address) + + self.assertRaises(exception.FloatingIpAlreadyInUse, + self.network.associate_floating_ip, + ctxt, + mox.IgnoreArg(), + mox.IgnoreArg()) + class CommonNetworkTestCase(test.TestCase): -- cgit From fdbb12e1e4b0b2cc28344510afb1c57620240901 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Tue, 30 Aug 2011 10:42:51 -0400 Subject: Fix a bad merge on my part, this fixes rebuilds\! --- nova/compute/manager.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 167be66db..0477db745 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -547,9 +547,6 @@ class ComputeManager(manager.SchedulerDependentManager): vm_state=vm_states.REBUILDING, task_state=task_states.BLOCK_DEVICE_MAPPING) - image_ref = kwargs.get('image_ref') - instance_ref.image_ref = image_ref - instance_ref.injected_files = kwargs.get('injected_files', []) network_info = self.network_api.get_instance_nw_info(context, instance_ref) @@ -572,11 +569,9 @@ class ComputeManager(manager.SchedulerDependentManager): power_state=current_power_state, vm_state=vm_states.ACTIVE, task_state=None, - image_ref=image_ref, launched_at=utils.utcnow()) - usage_info = utils.usage_from_instance(instance_ref, - image_ref=image_ref) + usage_info = utils.usage_from_instance(instance_ref) notifier.notify('compute.%s' % self.host, 'compute.instance.rebuild', notifier.INFO, -- cgit From 1155b734164eb5856d68c926f7bf64a37ae4a3a4 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 30 Aug 2011 11:13:25 -0400 Subject: supporting changes-since --- nova/api/openstack/servers.py | 24 +++++++++++++----------- nova/db/sqlalchemy/api.py | 12 ++++++++---- nova/tests/api/openstack/test_servers.py | 24 ++++++++++++++++++++++++ 3 files changed, 45 insertions(+), 15 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 27c67e79e..e0e40679a 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -107,6 +107,14 @@ class Controller(object): LOG.error(reason) raise exception.InvalidInput(reason=reason) + if 'changes-since' in search_opts: + try: + parsed = utils.parse_isotime(search_opts['changes-since']) + except ValueError: + msg = _('Invalid changes-since value') + raise exc.HTTPBadRequest(explanation=msg) + search_opts['changes-since'] = parsed + # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. @@ -114,23 +122,17 @@ class Controller(object): # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: - # Admin hasn't specified deleted filter if 'changes-since' not in search_opts: - # No 'changes-since', so we need to find non-deleted servers + # No 'changes-since', so we only want non-deleted servers search_opts['deleted'] = False - else: - # This is the default, but just in case.. - search_opts['deleted'] = True - - instance_list = self.compute_api.get_all( - context, search_opts=search_opts) - # FIXME(comstud): 'changes-since' is not fully implemented. Where - # should this be filtered? + instance_list = self.compute_api.get_all(context, + search_opts=search_opts) limited_list = self._limit_items(instance_list, req) servers = [self._build_view(req, inst, is_detail)['server'] - for inst in limited_list] + for inst in limited_list] + return dict(servers=servers) @scheduler_api.redirect_handler diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 65b09a65d..1685e9928 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -35,6 +35,7 @@ from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all from sqlalchemy.sql import func +from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import literal_column FLAGS = flags.FLAGS @@ -1250,12 +1251,17 @@ def instance_get_all_by_filters(context, filters): options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ - filter_by(deleted=can_read_deleted(context)) + order_by(desc(models.Instance.updated_at)) # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. filters = filters.copy() + if 'changes-since' in filters: + changes_since = filters['changes-since'] + query_prefix = query_prefix.\ + filter(models.Instance.updated_at > changes_since) + if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: @@ -1277,9 +1283,7 @@ def instance_get_all_by_filters(context, filters): query_prefix = _exact_match_filter(query_prefix, filter_name, filters.pop(filter_name)) - instances = query_prefix.\ - filter_by(deleted=can_read_deleted(context)).\ - all() + instances = query_prefix.all() if not instances: return [] diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3559e6de5..9036d6552 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1257,6 +1257,30 @@ class ServersTest(test.TestCase): self.assertEqual(len(servers), 1) self.assertEqual(servers[0]['id'], 100) + def test_get_servers_allows_changes_since_v1_1(self): + def fake_get_all(compute_self, context, search_opts=None): + self.assertNotEqual(search_opts, None) + self.assertTrue('changes-since' in search_opts) + changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1) + self.assertEqual(search_opts['changes-since'], changes_since) + return [stub_instance(100)] + + self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) + + params = 'changes-since=2011-01-24T17:08:01Z' + req = webob.Request.blank('/v1.1/fake/servers?%s' % params) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + servers = json.loads(res.body)['servers'] + self.assertEqual(len(servers), 1) + self.assertEqual(servers[0]['id'], 100) + + def test_get_servers_allows_changes_since_bad_value_v1_1(self): + params = 'changes-since=asdf' + req = webob.Request.blank('/v1.1/fake/servers?%s' % params) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + def test_get_servers_unknown_or_admin_options1(self): """Test getting servers by admin-only or unknown options. This tests when admin_api is off. Make sure the admin and -- cgit From a127747db0ab3405a768e8f680a2eb94ae8ce314 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 30 Aug 2011 11:44:19 -0400 Subject: adding an assert --- nova/tests/api/openstack/test_servers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 9036d6552..5f1ca466a 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -1263,6 +1263,7 @@ class ServersTest(test.TestCase): self.assertTrue('changes-since' in search_opts) changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1) self.assertEqual(search_opts['changes-since'], changes_since) + self.assertTrue('deleted' not in search_opts) return [stub_instance(100)] self.stubs.Set(nova.compute.API, 'get_all', fake_get_all) -- cgit From 1c6d74a08dbb5b472e85e3d3a1fe2b3b8b9b89e3 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 30 Aug 2011 12:51:02 -0400 Subject: changing default sort to created_at --- nova/db/sqlalchemy/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 1685e9928..1e55d08e7 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1251,7 +1251,7 @@ def instance_get_all_by_filters(context, filters): options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ - order_by(desc(models.Instance.updated_at)) + order_by(desc(models.Instance.created_at)) # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. -- cgit From 980ae6aa2f3797e428beee6e383d8bd134175734 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 30 Aug 2011 14:48:02 -0400 Subject: yielding all the images --- nova/image/glance.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index 9060f6a91..f2f246ba6 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -153,7 +153,8 @@ class GlanceImageService(service.BaseImageService): except KeyError: raise exception.ImagePaginationFailed() - self._fetch_images(fetch_func, **kwargs) + for image in self._fetch_images(fetch_func, **kwargs): + yield image def show(self, context, image_id): """Returns a dict with image data for the given opaque image id.""" -- cgit From 09fd29a9cc29904679cc8921adaf7559c23f347f Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 30 Aug 2011 14:52:23 -0400 Subject: fixing short-ciruit condition --- nova/image/glance.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index f2f246ba6..b5f52351f 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -141,12 +141,13 @@ class GlanceImageService(service.BaseImageService): """Paginate through results from glance server""" images = fetch_func(**kwargs) - for image in images: - yield image - else: + if not images: # break out of recursive loop to end pagination return + for image in images: + yield image + try: # attempt to advance the marker in order to fetch next page kwargs['marker'] = images[-1]['id'] -- cgit -- cgit From dcf5970dd9bed27201c593d7d053970a632e5eee Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 30 Aug 2011 12:01:18 -0700 Subject: make two functions instead of fast flag and add compute api commands instead of hitting db directly --- bin/instance-usage-audit | 5 ++- nova/api/openstack/contrib/simple_tenant_usage.py | 14 ++++----- nova/compute/api.py | 17 +++++++--- nova/db/api.py | 16 +++++++--- nova/db/sqlalchemy/api.py | 36 +++++++++++++++------- .../openstack/contrib/test_simple_tenant_usage.py | 10 +++--- 6 files changed, 62 insertions(+), 36 deletions(-) diff --git a/bin/instance-usage-audit b/bin/instance-usage-audit index a06c6b1b3..7ce5732e7 100755 --- a/bin/instance-usage-audit +++ b/bin/instance-usage-audit @@ -102,9 +102,8 @@ if __name__ == '__main__': logging.setup() begin, end = time_period(FLAGS.instance_usage_audit_period) print "Creating usages for %s until %s" % (str(begin), str(end)) - instances = db.instance_get_active_by_window(context.get_admin_context(), - begin, - end) + ctxt = context.get_admin_context() + instances = db.instance_get_active_by_window_joined(ctxt, begin, end) print "%s instances" % len(instances) for instance_ref in instances: usage_info = utils.usage_from_instance(instance_ref, diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index 16e712815..363ac1451 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -19,10 +19,9 @@ import urlparse import webob from datetime import datetime -from nova import db from nova import exception from nova import flags -from nova.compute import instance_types +from nova.compute import api from nova.api.openstack import extensions from nova.api.openstack import views from nova.db.sqlalchemy.session import get_session @@ -71,11 +70,11 @@ class SimpleTenantUsageController(object): def _tenant_usages_for_period(self, context, period_start, period_stop, tenant_id=None, detailed=True): - instances = db.instance_get_active_by_window(context, + compute_api = api.API() + instances = compute_api.get_active_by_window(context, period_start, period_stop, - tenant_id, - fast=True) + tenant_id) from nova import log as logging logging.info(instances) rval = {} @@ -90,8 +89,9 @@ class SimpleTenantUsageController(object): if not flavors.get(flavor_type): try: - flavors[flavor_type] = db.instance_type_get(context, - flavor_type) + it_ref = compute_api.get_instance_type(context, + flavor_type) + flavors[flavor_type] = it_ref except exception.InstanceTypeNotFound: # can't bill if there is no instance type continue diff --git a/nova/compute/api.py b/nova/compute/api.py index 3b4bde8ea..53bab53a4 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -19,13 +19,11 @@ """Handles all requests relating to instances (guest vms).""" -import eventlet import novaclient import re import time from nova import block_device -from nova import db from nova import exception from nova import flags import nova.image @@ -237,7 +235,7 @@ class API(base.Base): self.ensure_default_security_group(context) if key_data is None and key_name: - key_pair = db.key_pair_get(context, context.user_id, key_name) + key_pair = self.db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] if reservation_id is None: @@ -802,6 +800,15 @@ class API(base.Base): "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id}}) + def get_active_by_window(self, context, begin, end=None, project_id=None): + """Get instances that were continuously active over a window.""" + return self.db.instance_get_active_by_window(context, begin, end, + project_id) + + def get_instance_type(self, context, instance_type_id): + """Get an instance type by instance type id.""" + return self.db.instance_type_get(context, instance_type_id) + def get(self, context, instance_id): """Get a single instance with the given instance_id.""" # NOTE(sirp): id used to be exclusively integer IDs; now we're @@ -1001,7 +1008,7 @@ class API(base.Base): :param extra_properties: dict of extra image properties to include """ - instance = db.api.instance_get(context, instance_id) + instance = self.db.api.instance_get(context, instance_id) properties = {'instance_uuid': instance['uuid'], 'user_id': str(context.user_id), 'image_state': 'creating', @@ -1026,7 +1033,7 @@ class API(base.Base): def rebuild(self, context, instance_id, image_href, admin_password, name=None, metadata=None, files_to_inject=None): """Rebuild the given instance with the provided metadata.""" - instance = db.api.instance_get(context, instance_id) + instance = self.db.instance_get(context, instance_id) if instance["state"] == power_state.BUILDING: msg = _("Instance already building") diff --git a/nova/db/api.py b/nova/db/api.py index 3233985b6..148887635 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -496,14 +496,20 @@ def instance_get_all_by_filters(context, filters): return IMPL.instance_get_all_by_filters(context, filters) -def instance_get_active_by_window(context, begin, end=None, - project_id=None, fast=False): +def instance_get_active_by_window(context, begin, end=None, project_id=None): """Get instances active during a certain time window. - Setting fast to true will stop all joinedloads. Specifying a project_id will filter for a certain project.""" - return IMPL.instance_get_active_by_window(context, begin, end, - project_id, fast) + return IMPL.instance_get_active_by_window(context, begin, end, project_id) + + +def instance_get_active_by_window_joined(context, begin, end=None, + project_id=None): + """Get instances and joins active during a certain time window. + + Specifying a project_id will filter for a certain project.""" + return IMPL.instance_get_active_by_window_joined(context, begin, end, + project_id) def instance_get_all_by_user(context, user_id): diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index c4cc199eb..d76dc22ed 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1306,19 +1306,33 @@ def instance_get_all_by_filters(context, filters): return instances -@require_admin_context -def instance_get_active_by_window(context, begin, end=None, - project_id=None, fast=False): - """Return instances that were continuously active over the given window""" +@require_context +def instance_get_active_by_window(context, begin, end=None, project_id=None): + """Return instances that were continuously active over window.""" session = get_session() - query = session.query(models.Instance) - if not fast: - query = query.options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ - options(joinedload('instance_type')) + query = session.query(models.Instance).\ + filter(models.Instance.launched_at < begin) + if end: + query = query.filter(or_(models.Instance.terminated_at == None, + models.Instance.terminated_at > end)) + else: + query = query.filter(models.Instance.terminated_at == None) + if project_id: + query = query.filter_by(project_id=project_id) + return query.all() + - query = query.filter(models.Instance.launched_at < begin) +@require_admin_context +def instance_get_active_by_window_joined(context, begin, end=None, + project_id=None): + """Return instances and joins that were continuously active over window.""" + session = get_session() + query = session.query(models.Instance).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('instance_type')).\ + filter(models.Instance.launched_at < begin) if end: query = query.filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > end)) diff --git a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py index 2bd619820..de0a6d779 100644 --- a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py +++ b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py @@ -20,9 +20,9 @@ import json import webob from nova import context -from nova import db from nova import flags from nova import test +from nova.compute import api from nova.tests.api.openstack import fakes @@ -39,7 +39,7 @@ STOP = datetime.datetime.utcnow() START = STOP - datetime.timedelta(hours=HOURS) -def fake_instance_type_get(context, instance_type_id): +def fake_instance_type_get(self, context, instance_type_id): return {'id': 1, 'vcpus': VCPUS, 'local_gb': LOCAL_GB, @@ -59,7 +59,7 @@ def get_fake_db_instance(start, end, instance_id, tenant_id): 'launched_at': start, 'terminated_at': end} -def fake_instance_get_active_by_window(context, begin, end, project_id, fast): +def fake_instance_get_active_by_window(self, context, begin, end, project_id): return [get_fake_db_instance(START, STOP, x, @@ -70,9 +70,9 @@ def fake_instance_get_active_by_window(context, begin, end, project_id, fast): class SimpleTenantUsageTest(test.TestCase): def setUp(self): super(SimpleTenantUsageTest, self).setUp() - self.stubs.Set(db, "instance_type_get", + self.stubs.Set(api.API, "get_instance_type", fake_instance_type_get) - self.stubs.Set(db, "instance_get_active_by_window", + self.stubs.Set(api.API, "get_active_by_window", fake_instance_get_active_by_window) self.admin_context = context.RequestContext('fakeadmin_0', 'faketenant_0', -- cgit From 85e182e72d8f15678234701f6b254bf6c8e17f3a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 30 Aug 2011 12:08:53 -0700 Subject: fix a bunch of direct usages of db in compute api --- nova/compute/api.py | 52 +++++++++++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 25 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 53bab53a4..f1099576e 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -387,9 +387,9 @@ class API(base.Base): security_groups = [] for security_group_name in security_group: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) + group = self.db.security_group_get_by_name(context, + context.project_id, + security_group_name) security_groups.append(group['id']) for security_group_id in security_groups: @@ -549,8 +549,9 @@ class API(base.Base): def has_finished_migration(self, context, instance_uuid): """Returns true if an instance has a finished migration.""" try: - db.migration_get_by_instance_and_status(context, instance_uuid, - 'finished') + self.db.migration_get_by_instance_and_status(context, + instance_uuid, + 'finished') return True except exception.NotFound: return False @@ -564,14 +565,15 @@ class API(base.Base): :param context: the security context """ try: - db.security_group_get_by_name(context, context.project_id, - 'default') + self.db.security_group_get_by_name(context, + context.project_id, + 'default') except exception.NotFound: values = {'name': 'default', 'description': 'default', 'user_id': context.user_id, 'project_id': context.project_id} - db.security_group_create(context, values) + self.db.security_group_create(context, values) def trigger_security_group_rules_refresh(self, context, security_group_id): """Called when a rule is added to or removed from a security_group.""" @@ -636,7 +638,7 @@ class API(base.Base): """Called when a rule is added to or removed from a security_group""" hosts = [x['host'] for (x, idx) - in db.service_get_all_compute_sorted(context)] + in self.db.service_get_all_compute_sorted(context)] for host in hosts: rpc.cast(context, self.db.queue_get_for(context, FLAGS.compute_topic, host), @@ -664,11 +666,11 @@ class API(base.Base): def add_security_group(self, context, instance_id, security_group_name): """Add security group to the instance""" - security_group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) + security_group = self.db.security_group_get_by_name(context, + context.project_id, + security_group_name) # check if the server exists - inst = db.instance_get(context, instance_id) + inst = self.db.instance_get(context, instance_id) #check if the security group is associated with the server if self._is_security_group_associated_with_server(security_group, instance_id): @@ -680,21 +682,21 @@ class API(base.Base): if inst['state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_id) - db.instance_add_security_group(context.elevated(), - instance_id, - security_group['id']) + self.db.instance_add_security_group(context.elevated(), + instance_id, + security_group['id']) rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, inst['host']), + self.db.queue_get_for(context, FLAGS.compute_topic, inst['host']), {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group['id']}}) def remove_security_group(self, context, instance_id, security_group_name): """Remove the security group associated with the instance""" - security_group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) + security_group = self.db.security_group_get_by_name(context, + context.project_id, + security_group_name) # check if the server exists - inst = db.instance_get(context, instance_id) + inst = self.db.instance_get(context, instance_id) #check if the security group is associated with the server if not self._is_security_group_associated_with_server(security_group, instance_id): @@ -706,11 +708,11 @@ class API(base.Base): if inst['state'] != power_state.RUNNING: raise exception.InstanceNotRunning(instance_id=instance_id) - db.instance_remove_security_group(context.elevated(), - instance_id, - security_group['id']) + self.db.instance_remove_security_group(context.elevated(), + instance_id, + security_group['id']) rpc.cast(context, - db.queue_get_for(context, FLAGS.compute_topic, inst['host']), + self.db.queue_get_for(context, FLAGS.compute_topic, inst['host']), {"method": "refresh_security_group_rules", "args": {"security_group_id": security_group['id']}}) -- cgit From 2fcc6da8ba528c5169f7394d57f90ccd2754a23c Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 30 Aug 2011 12:14:25 -0700 Subject: pep8, fix fakes --- nova/api/openstack/contrib/simple_tenant_usage.py | 1 + nova/compute/api.py | 12 ++++++------ nova/tests/api/openstack/contrib/test_createserverext.py | 2 ++ nova/tests/api/openstack/contrib/test_simple_tenant_usage.py | 2 +- nova/tests/api/openstack/test_servers.py | 1 + 5 files changed, 11 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index 363ac1451..e81aef66e 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -30,6 +30,7 @@ from webob import exc FLAGS = flags.FLAGS + class SimpleTenantUsageController(object): def _hours_for(self, instance, period_start, period_stop): launched_at = instance['launched_at'] diff --git a/nova/compute/api.py b/nova/compute/api.py index f1099576e..0074028e2 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -388,8 +388,8 @@ class API(base.Base): security_groups = [] for security_group_name in security_group: group = self.db.security_group_get_by_name(context, - context.project_id, - security_group_name) + context.project_id, + security_group_name) security_groups.append(group['id']) for security_group_id in security_groups: @@ -667,8 +667,8 @@ class API(base.Base): def add_security_group(self, context, instance_id, security_group_name): """Add security group to the instance""" security_group = self.db.security_group_get_by_name(context, - context.project_id, - security_group_name) + context.project_id, + security_group_name) # check if the server exists inst = self.db.instance_get(context, instance_id) #check if the security group is associated with the server @@ -693,8 +693,8 @@ class API(base.Base): def remove_security_group(self, context, instance_id, security_group_name): """Remove the security group associated with the instance""" security_group = self.db.security_group_get_by_name(context, - context.project_id, - security_group_name) + context.project_id, + security_group_name) # check if the server exists inst = self.db.instance_get(context, instance_id) #check if the security group is associated with the server diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index e5eed14fe..f6d9ba784 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -23,6 +23,7 @@ from xml.dom import minidom import stubout import webob +from nova import db from nova import exception from nova import flags from nova import test @@ -76,6 +77,7 @@ class CreateserverextTest(test.TestCase): def __init__(self): self.injected_files = None self.networks = None + self.db = db def create(self, *args, **kwargs): if 'injected_files' in kwargs: diff --git a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py index de0a6d779..2430b9d51 100644 --- a/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py +++ b/nova/tests/api/openstack/contrib/test_simple_tenant_usage.py @@ -26,7 +26,6 @@ from nova.compute import api from nova.tests.api.openstack import fakes - FLAGS = flags.FLAGS SERVERS = 5 @@ -59,6 +58,7 @@ def get_fake_db_instance(start, end, instance_id, tenant_id): 'launched_at': start, 'terminated_at': end} + def fake_instance_get_active_by_window(self, context, begin, end, project_id): return [get_fake_db_instance(START, STOP, diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 3559e6de5..d065f48b6 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3229,6 +3229,7 @@ class TestServerInstanceCreation(test.TestCase): def __init__(self): self.injected_files = None self.networks = None + self.db = db def create(self, *args, **kwargs): if 'injected_files' in kwargs: -- cgit From 5cf27b5a338f7821f82c91df5889159b56fa0bb6 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Tue, 30 Aug 2011 12:41:30 -0700 Subject: fix remaining tests --- nova/api/openstack/contrib/simple_tenant_usage.py | 2 +- nova/compute/api.py | 2 +- .../api/openstack/contrib/test_security_groups.py | 72 +++++++++++----------- nova/tests/api/openstack/test_extensions.py | 1 + nova/tests/api/openstack/test_server_actions.py | 2 +- 5 files changed, 40 insertions(+), 39 deletions(-) diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index e81aef66e..69b38e229 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -212,7 +212,7 @@ class SimpleTenantUsageController(object): class Simple_tenant_usage(extensions.ExtensionDescriptor): def get_name(self): - return "Simple_tenant_usage" + return "SimpleTenantUsage" def get_alias(self): return "os-simple-tenant-usage" diff --git a/nova/compute/api.py b/nova/compute/api.py index 0074028e2..205207d66 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1010,7 +1010,7 @@ class API(base.Base): :param extra_properties: dict of extra image properties to include """ - instance = self.db.api.instance_get(context, instance_id) + instance = self.db.instance_get(context, instance_id) properties = {'instance_uuid': instance['uuid'], 'user_id': str(context.user_id), 'image_state': 'creating', diff --git a/nova/tests/api/openstack/contrib/test_security_groups.py b/nova/tests/api/openstack/contrib/test_security_groups.py index bc1536911..0816a6312 100644 --- a/nova/tests/api/openstack/contrib/test_security_groups.py +++ b/nova/tests/api/openstack/contrib/test_security_groups.py @@ -360,7 +360,7 @@ class TestSecurityGroups(test.TestCase): def test_associate_by_invalid_server_id(self): body = dict(addSecurityGroup=dict(name='test')) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) req = webob.Request.blank('/v1.1/123/servers/invalid/action') req.headers['Content-Type'] = 'application/json' @@ -372,7 +372,7 @@ class TestSecurityGroups(test.TestCase): def test_associate_without_body(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(addSecurityGroup=None) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -382,7 +382,7 @@ class TestSecurityGroups(test.TestCase): def test_associate_no_security_group_name(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(addSecurityGroup=dict()) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -392,7 +392,7 @@ class TestSecurityGroups(test.TestCase): def test_associate_security_group_name_with_whitespaces(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(addSecurityGroup=dict(name=" ")) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -400,9 +400,9 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_associate_non_existing_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant) + self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) body = dict(addSecurityGroup=dict(name="test")) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) req = webob.Request.blank('/v1.1/123/servers/10000/action') req.headers['Content-Type'] = 'application/json' @@ -412,8 +412,8 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 404) def test_associate_non_running_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_non_running_server) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'instance_get', return_non_running_server) + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group_without_instances) body = dict(addSecurityGroup=dict(name="test")) req = webob.Request.blank('/v1.1/123/servers/1/action') @@ -424,8 +424,8 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_associate_already_associated_security_group_to_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) body = dict(addSecurityGroup=dict(name="test")) req = webob.Request.blank('/v1.1/123/servers/1/action') @@ -436,12 +436,12 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_associate(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.mox.StubOutWithMock(nova.db, 'instance_add_security_group') - nova.db.instance_add_security_group(mox.IgnoreArg(), + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db.api, 'instance_add_security_group') + nova.db.api.instance_add_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group_without_instances) self.mox.ReplayAll() @@ -454,12 +454,12 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 202) def test_associate_xml(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.mox.StubOutWithMock(nova.db, 'instance_add_security_group') - nova.db.instance_add_security_group(mox.IgnoreArg(), + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db.api, 'instance_add_security_group') + nova.db.api.instance_add_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group_without_instances) self.mox.ReplayAll() @@ -483,7 +483,7 @@ class TestSecurityGroups(test.TestCase): def test_disassociate_by_invalid_server_id(self): body = dict(removeSecurityGroup=dict(name='test')) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) req = webob.Request.blank('/v1.1/123/servers/invalid/action') req.headers['Content-Type'] = 'application/json' @@ -495,7 +495,7 @@ class TestSecurityGroups(test.TestCase): def test_disassociate_without_body(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(removeSecurityGroup=None) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -505,7 +505,7 @@ class TestSecurityGroups(test.TestCase): def test_disassociate_no_security_group_name(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(removeSecurityGroup=dict()) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -515,7 +515,7 @@ class TestSecurityGroups(test.TestCase): def test_disassociate_security_group_name_with_whitespaces(self): req = webob.Request.blank('/v1.1/123/servers/1/action') body = dict(removeSecurityGroup=dict(name=" ")) - self.stubs.Set(nova.db, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server) req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body) @@ -523,9 +523,9 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_disassociate_non_existing_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_server_nonexistant) + self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) body = dict(removeSecurityGroup=dict(name="test")) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) req = webob.Request.blank('/v1.1/123/servers/10000/action') req.headers['Content-Type'] = 'application/json' @@ -535,8 +535,8 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 404) def test_disassociate_non_running_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_non_running_server) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'instance_get', return_non_running_server) + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) body = dict(removeSecurityGroup=dict(name="test")) req = webob.Request.blank('/v1.1/123/servers/1/action') @@ -547,8 +547,8 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_disassociate_already_associated_security_group_to_instance(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group_without_instances) body = dict(removeSecurityGroup=dict(name="test")) req = webob.Request.blank('/v1.1/123/servers/1/action') @@ -559,12 +559,12 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 400) def test_disassociate(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group') - nova.db.instance_remove_security_group(mox.IgnoreArg(), + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db.api, 'instance_remove_security_group') + nova.db.api.instance_remove_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) self.mox.ReplayAll() @@ -577,12 +577,12 @@ class TestSecurityGroups(test.TestCase): self.assertEquals(response.status_int, 202) def test_disassociate_xml(self): - self.stubs.Set(nova.db, 'instance_get', return_server) - self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group') - nova.db.instance_remove_security_group(mox.IgnoreArg(), + self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.mox.StubOutWithMock(nova.db.api, 'instance_remove_security_group') + nova.db.api.instance_remove_security_group(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) - self.stubs.Set(nova.db, 'security_group_get_by_name', + self.stubs.Set(nova.db.api, 'security_group_get_by_name', return_security_group) self.mox.ReplayAll() diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 05267d8fb..31443242b 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -95,6 +95,7 @@ class ExtensionControllerTest(test.TestCase): "Quotas", "Rescue", "SecurityGroups", + "SimpleTenantUsage", "VSAs", "VirtualInterfaces", "Volumes", diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 3dfdeb79c..c9c33abbd 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -248,7 +248,7 @@ class ServerActionsTest(test.TestCase): def fake_migration_get(*args): return {} - self.stubs.Set(nova.db, 'migration_get_by_instance_and_status', + self.stubs.Set(nova.db.api, 'migration_get_by_instance_and_status', fake_migration_get) res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) -- cgit From 4f65e0153c22886b118bdb92402b91d9b209632c Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 13:53:01 -0700 Subject: restore old way FLAGS.rpc_backend worked.. no short name support for consistency --- nova/rpc/__init__.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index fe50fb476..4f57a345d 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,18 +23,10 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'kombu', + 'nova.rpc.impl_kombu', "The messaging module to use, defaults to kombu.") -impl_table = {'kombu': 'nova.rpc.impl_kombu', - 'amqp': 'nova.rpc.impl_kombu', - 'carrot': 'nova.rpc.impl_carrot'} - - -# rpc_backend can be a short name like 'kombu', or it can be the full -# module name -RPCIMPL = import_object(impl_table.get(FLAGS.rpc_backend, - FLAGS.rpc_backend)) +RPCIMPL = import_object(FLAGS.rpc_backend) def create_connection(new=True): -- cgit From b6c306b1a207fd2c5ee2e53d841fd8e60c2fd8e1 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 13:55:06 -0700 Subject: add kombu to pip-requires and contrib/nova.sh --- contrib/nova.sh | 2 +- tools/pip-requires | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/nova.sh b/contrib/nova.sh index 7994e5133..16cddebd5 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -81,7 +81,7 @@ if [ "$CMD" == "install" ]; then sudo apt-get install -y python-netaddr python-pastedeploy python-eventlet sudo apt-get install -y python-novaclient python-glance python-cheetah sudo apt-get install -y python-carrot python-tempita python-sqlalchemy - sudo apt-get install -y python-suds + sudo apt-get install -y python-suds python-kombu if [ "$USE_IPV6" == 1 ]; then diff --git a/tools/pip-requires b/tools/pip-requires index 60b502ffd..66d6a48d9 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -8,6 +8,7 @@ anyjson==0.2.4 boto==1.9b carrot==0.10.5 eventlet +kombu lockfile==0.8 lxml==2.3 python-novaclient==2.6.0 -- cgit From e326acf46748904704dd97f511927559dc2480f2 Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Tue, 30 Aug 2011 15:05:39 -0700 Subject: Fix for LP Bug #837534 --- nova/api/openstack/create_instance_helper.py | 13 ++++++ .../api/openstack/contrib/test_createserverext.py | 46 +++++++++++++++++++++- 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 483ff4985..019283fdf 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -138,7 +138,10 @@ class CreateInstanceHelper(object): raise exc.HTTPBadRequest(explanation=msg) zone_blob = server_dict.get('blob') + user_data = server_dict.get('user_data') + self._validate_user_data(user_data) + availability_zone = server_dict.get('availability_zone') name = server_dict['name'] self._validate_server_name(name) @@ -370,6 +373,16 @@ class CreateInstanceHelper(object): return networks + def _validate_user_data(self, user_data): + """Check if the user_data is encoded properly""" + if not user_data: + return + try: + user_data = base64.b64decode(user_data) + except TypeError: + expl = _('Userdata content cannot be decoded') + raise exc.HTTPBadRequest(explanation=expl) + class ServerXMLDeserializer(wsgi.XMLDeserializer): """ diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index e5eed14fe..65c7c1682 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -37,7 +37,8 @@ import nova.image.fake import nova.rpc from nova.tests.api.openstack import fakes - +from nova import log as logging +LOG = logging.getLogger('nova.api.openstack.test') FLAGS = flags.FLAGS FLAGS.verbose = True @@ -76,6 +77,7 @@ class CreateserverextTest(test.TestCase): def __init__(self): self.injected_files = None self.networks = None + self.user_data = None def create(self, *args, **kwargs): if 'injected_files' in kwargs: @@ -87,6 +89,10 @@ class CreateserverextTest(test.TestCase): self.networks = kwargs['requested_networks'] else: self.networks = None + + if 'user_data' in kwargs: + self.user_data = kwargs['user_data'] + return [{'id': '1234', 'display_name': 'fakeinstance', 'uuid': FAKE_UUID, 'created_at': "", @@ -119,6 +125,14 @@ class CreateserverextTest(test.TestCase): server['networks'] = network_list return {'server': server} + def _create_user_data_request_dict(self, user_data): + server = {} + server['name'] = 'new-server-test' + server['imageRef'] = 1 + server['flavorRef'] = 1 + server['user_data'] = user_data + return {'server': server} + def _get_create_request_json(self, body_dict): req = webob.Request.blank('/v1.1/123/os-create-server-ext') req.headers['Content-Type'] = 'application/json' @@ -178,6 +192,13 @@ class CreateserverextTest(test.TestCase): self._run_create_instance_with_mock_compute_api(request) return request, response, compute_api.networks + def _create_instance_with_user_data_json(self, networks): + body_dict = self._create_user_data_request_dict(networks) + request = self._get_create_request_json(body_dict) + compute_api, response = \ + self._run_create_instance_with_mock_compute_api(request) + return request, response, compute_api.user_data + def _create_instance_with_networks_xml(self, networks): body_dict = self._create_networks_request_dict(networks) request = self._get_create_request_xml(body_dict) @@ -188,6 +209,7 @@ class CreateserverextTest(test.TestCase): def test_create_instance_with_no_networks(self): request, response, networks = \ self._create_instance_with_networks_json(networks=None) + LOG.debug(response) self.assertEquals(response.status_int, 202) self.assertEquals(networks, None) @@ -304,3 +326,25 @@ class CreateserverextTest(test.TestCase): self.assertEquals(response.status_int, 202) self.assertEquals(compute_api.networks, [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]) + + def test_create_instance_with_userdata(self): + user_data_contents = '#!/bin/bash\necho "Oh no!"\n' + user_data_contents = base64.b64encode(user_data_contents) + request, response, user_data = \ + self._create_instance_with_user_data_json(user_data_contents) + self.assertEquals(response.status_int, 202) + self.assertEquals(user_data, user_data_contents) + + def test_create_instance_with_userdata_none(self): + user_data_contents = None + request, response, user_data = \ + self._create_instance_with_user_data_json(user_data_contents) + self.assertEquals(response.status_int, 202) + self.assertEquals(user_data, user_data_contents) + + def test_create_instance_with_userdata_with_non_b64_content(self): + user_data_contents = '#!/bin/bash\necho "Oh no!"\n' + request, response, user_data = \ + self._create_instance_with_user_data_json(user_data_contents) + self.assertEquals(response.status_int, 400) + self.assertEquals(user_data, None) -- cgit From 476101d81cf81e6035b44e2257c1bcd8e958043a Mon Sep 17 00:00:00 2001 From: Tushar Patil Date: Tue, 30 Aug 2011 15:09:08 -0700 Subject: Removed debug messages --- nova/tests/api/openstack/contrib/test_createserverext.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nova/tests/api/openstack/contrib/test_createserverext.py b/nova/tests/api/openstack/contrib/test_createserverext.py index 65c7c1682..d8a5c9e55 100644 --- a/nova/tests/api/openstack/contrib/test_createserverext.py +++ b/nova/tests/api/openstack/contrib/test_createserverext.py @@ -37,8 +37,7 @@ import nova.image.fake import nova.rpc from nova.tests.api.openstack import fakes -from nova import log as logging -LOG = logging.getLogger('nova.api.openstack.test') + FLAGS = flags.FLAGS FLAGS.verbose = True @@ -209,7 +208,6 @@ class CreateserverextTest(test.TestCase): def test_create_instance_with_no_networks(self): request, response, networks = \ self._create_instance_with_networks_json(networks=None) - LOG.debug(response) self.assertEquals(response.status_int, 202) self.assertEquals(networks, None) -- cgit From 4ec4ddd2e6465f0483ecf50d430458169ad4c348 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 15:12:43 -0700 Subject: make default carrot again and delay the import in rpc/__init__.py --- nova/rpc/__init__.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 4f57a345d..32509fff6 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,27 +23,33 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'nova.rpc.impl_kombu', - "The messaging module to use, defaults to kombu.") + 'nova.rpc.impl_carrot', + "The messaging module to use, defaults to carrot.") -RPCIMPL = import_object(FLAGS.rpc_backend) +_RPCIMPL = None + +def get_impl(): + global _RPCIMPL + if _RPCIMPL is None: + _RPCIMPL = import_object(FLAGS.rpc_backend) + return _RPCIMPL def create_connection(new=True): - return RPCIMPL.create_connection(new=new) + return get_impl().create_connection(new=new) def call(context, topic, msg): - return RPCIMPL.call(context, topic, msg) + return get_impl().call(context, topic, msg) def cast(context, topic, msg): - return RPCIMPL.cast(context, topic, msg) + return get_impl().cast(context, topic, msg) def fanout_cast(context, topic, msg): - return RPCIMPL.fanout_cast(context, topic, msg) + return get_impl().fanout_cast(context, topic, msg) def multicall(context, topic, msg): - return RPCIMPL.multicall(context, topic, msg) + return get_impl().multicall(context, topic, msg) -- cgit From 5ccbce699880557f9c58d4d403487979d3604ccf Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 15:30:16 -0700 Subject: pep8 fix --- nova/rpc/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index 32509fff6..e29cd80e1 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -28,7 +28,9 @@ flags.DEFINE_string('rpc_backend', _RPCIMPL = None + def get_impl(): + """Delay import of rpc_backend until FLAGS are loaded.""" global _RPCIMPL if _RPCIMPL is None: _RPCIMPL = import_object(FLAGS.rpc_backend) -- cgit From 2e12e975ee9d4ab7a17eebb0e36714b56d6b1779 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 15:52:25 -0700 Subject: logging change when rpc pool creates new connection --- nova/rpc/impl_carrot.py | 2 +- nova/rpc/impl_kombu.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py index 1d23c1853..303a4ff88 100644 --- a/nova/rpc/impl_carrot.py +++ b/nova/rpc/impl_carrot.py @@ -152,7 +152,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): - LOG.debug('Creating new connection') + LOG.debug('Pool creating new connection') return Connection.instance(new=True) # Create a ConnectionPool to use for RPC calls. We'll order the diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index ffd6447da..8242bd177 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -491,7 +491,7 @@ class Pool(pools.Pool): # TODO(comstud): Timeout connections not used in a while def create(self): - LOG.debug('Creating new connection') + LOG.debug('Pool creating new connection') return Connection() # Create a ConnectionPool to use for RPC calls. We'll order the -- cgit From 66aa9a6306cde5db2039daaf11a8422619560a33 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 30 Aug 2011 20:45:51 -0700 Subject: fix FloatingIpAlreadyInUse to use correct string pattern, convert ApiErrors to 400 responses --- nova/api/openstack/contrib/floating_ips.py | 6 +++- nova/exception.py | 2 +- .../api/openstack/contrib/test_floating_ips.py | 32 ++++++++++++++++++++++ 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 40086f778..ad3094d52 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -138,7 +138,11 @@ class Floating_ips(extensions.ExtensionDescriptor): msg = _("Address not specified") raise webob.exc.HTTPBadRequest(explanation=msg) - self.compute_api.associate_floating_ip(context, instance_id, address) + try: + self.compute_api.associate_floating_ip(context, instance_id, + address) + except exception.ApiError, e: + raise webob.exc.HTTPBadRequest(explanation=e.message) return webob.Response(status_int=202) diff --git a/nova/exception.py b/nova/exception.py index b54981963..caa65146d 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -534,7 +534,7 @@ class NoMoreFloatingIps(FloatingIpNotFound): class FloatingIpAlreadyInUse(NovaException): - message = _("Floating ip %(address) already in use by %(fixed_ip).") + message = _("Floating ip %(address)s already in use by %(fixed_ip)s.") class NoFloatingIpsDefined(NotFound): diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index c14c29bf3..08148278d 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -205,6 +205,38 @@ class FloatingIpTest(test.TestCase): resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 202) + def test_associate_floating_ip_to_instance_wrong_project_id(self): + def fake_fixed_ip_get_by_address(ctx, address, session=None): + return {'address': address, 'network': {'multi_host': None, + 'host': 'fake'}} + self.stubs.Set(db.api, "fixed_ip_get_by_address", + fake_fixed_ip_get_by_address) + db.floating_ip_update(self.context, self.address, {'project_id': 'bad', + 'fixed_ip_id': 1}) + body = dict(addFloatingIp=dict(address=self.address)) + req = webob.Request.blank('/v1.1/123/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + + def test_associate_floating_ip_to_instance_no_project_id(self): + def fake_fixed_ip_get_by_address(ctx, address, session=None): + return {'address': address, 'network': {'multi_host': None, + 'host': 'fake'}} + self.stubs.Set(db.api, "fixed_ip_get_by_address", + fake_fixed_ip_get_by_address) + db.floating_ip_update(self.context, self.address, {'project_id': None, + 'fixed_ip_id': 1}) + body = dict(addFloatingIp=dict(address=self.address)) + req = webob.Request.blank('/v1.1/123/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 400) + def test_add_associated_floating_ip_to_instance(self): def fake_fixed_ip_get_by_address(ctx, address, session=None): return {'address': address, 'network': {'multi_host': None, -- cgit From b7c98734f8829fb4b213869bdfca6481fbeab98e Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 20:53:27 -0700 Subject: use kombu.connection.BrokerConnection vs kombu.connection.Connection so that older versions of kombu (1.0.4) work as well as newer. --- nova/rpc/impl_kombu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index 8242bd177..ab70e7cfb 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -322,7 +322,7 @@ class Connection(object): except self.connection.connection_errors: pass time.sleep(1) - self.connection = kombu.connection.Connection(**self.params) + self.connection = kombu.connection.BrokerConnection(**self.params) if FLAGS.fake_rabbit: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 -- cgit From e43ffb5137ba256a21b3241b549d7c66cb7e5e04 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Tue, 30 Aug 2011 21:05:43 -0700 Subject: switched default to kombu per vishy --- nova/rpc/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/rpc/__init__.py b/nova/rpc/__init__.py index e29cd80e1..c0cfdd5ce 100644 --- a/nova/rpc/__init__.py +++ b/nova/rpc/__init__.py @@ -23,8 +23,8 @@ from nova import flags FLAGS = flags.FLAGS flags.DEFINE_string('rpc_backend', - 'nova.rpc.impl_carrot', - "The messaging module to use, defaults to carrot.") + 'nova.rpc.impl_kombu', + "The messaging module to use, defaults to kombu.") _RPCIMPL = None -- cgit From 1477b8c33374db1166c6c67ff68e03c94f3436a5 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Tue, 30 Aug 2011 21:16:26 -0700 Subject: add test to verify 400 response when out of addresses --- nova/api/openstack/contrib/floating_ips.py | 2 +- nova/tests/api/openstack/contrib/test_floating_ips.py | 18 +++++++++++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index ad3094d52..99c0d1469 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -96,7 +96,7 @@ class FloatingIPController(object): except rpc.RemoteError as ex: # NOTE(tr3buchet) - why does this block exist? if ex.exc_type == 'NoMoreFloatingIps': - raise exception.NoMoreFloatingIps() + raise webob.exc.HTTPBadRequest(explanation=ex.message) else: raise diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index 08148278d..fc10f2f6c 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -20,8 +20,9 @@ import webob from nova import compute from nova import context from nova import db -from nova import test from nova import network +from nova import rpc +from nova import test from nova.tests.api.openstack import fakes from nova.tests.api.openstack import test_servers @@ -114,8 +115,6 @@ class FloatingIpTest(test.TestCase): network_api_get_floating_ip) self.stubs.Set(network.api.API, "list_floating_ips", network_api_list_floating_ips) - self.stubs.Set(network.api.API, "allocate_floating_ip", - network_api_allocate) self.stubs.Set(network.api.API, "release_floating_ip", network_api_release) self.stubs.Set(network.api.API, "disassociate_floating_ip", @@ -172,7 +171,20 @@ class FloatingIpTest(test.TestCase): self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10') self.assertEqual(res_dict['floating_ip']['instance_id'], None) + def test_floating_ip_allocate_no_free_ips(self): + def fake_call(*args, **kwargs): + raise(rpc.RemoteError('NoMoreFloatingIps', '', '')) + + self.stubs.Set(rpc, "call", fake_call) + req = webob.Request.blank('/v1.1/123/os-floating-ips') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + def test_floating_ip_allocate(self): + self.stubs.Set(network.api.API, "allocate_floating_ip", + network_api_allocate) req = webob.Request.blank('/v1.1/123/os-floating-ips') req.method = 'POST' req.headers['Content-Type'] = 'application/json' -- cgit From c9a6681f484f38778987fbbaa352d07bd8f747c3 Mon Sep 17 00:00:00 2001 From: Brian Lamar Date: Wed, 31 Aug 2011 10:14:07 -0400 Subject: Removed extraneous import and s/vm_state.STOP/vm_states.STOPPED/ --- nova/db/sqlalchemy/api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 24e1772f6..631e53ceb 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -28,6 +28,7 @@ from nova import flags from nova import ipv6 from nova import utils from nova import log as logging +from nova.compute import vm_states from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import get_session from sqlalchemy import or_ @@ -1102,11 +1103,10 @@ def instance_destroy(context, instance_id): def instance_stop(context, instance_id): session = get_session() with session.begin(): - from nova.compute import power_state session.query(models.Instance).\ filter_by(id=instance_id).\ update({'host': None, - 'vm_state': vm_state.STOP, + 'vm_state': vm_states.STOPPED, 'task_state': None, 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ -- cgit From ba8163fed57bcd1948be4cfb021fb32391702cc5 Mon Sep 17 00:00:00 2001 From: Chris Behrens Date: Wed, 31 Aug 2011 11:54:19 -0700 Subject: kludge for kombu 1.1.3 memory transport bug --- nova/rpc/impl_kombu.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py index ab70e7cfb..b994a6a10 100644 --- a/nova/rpc/impl_kombu.py +++ b/nova/rpc/impl_kombu.py @@ -303,6 +303,7 @@ class Connection(object): self.interval_stepping = FLAGS.rabbit_retry_backoff # max retry-interval = 30 seconds self.interval_max = 30 + self.memory_transport = False self.params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, @@ -311,6 +312,9 @@ class Connection(object): virtual_host=FLAGS.rabbit_virtual_host) if FLAGS.fake_rabbit: self.params['transport'] = 'memory' + self.memory_transport = True + else: + self.memory_transport = False self.connection = None self.reconnect() @@ -323,7 +327,7 @@ class Connection(object): pass time.sleep(1) self.connection = kombu.connection.BrokerConnection(**self.params) - if FLAGS.fake_rabbit: + if self.memory_transport: # Kludge to speed up tests. self.connection.transport.polling_interval = 0.0 self.consumer_num = itertools.count(1) @@ -345,6 +349,9 @@ class Connection(object): LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d' % self.params)) self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') for consumer in self.consumers: consumer.reconnect(self.channel) if self.consumers: @@ -374,6 +381,9 @@ class Connection(object): self.cancel_consumer_thread() self.channel.close() self.channel = self.connection.channel() + # work around 'memory' transport bug in 1.1.3 + if self.memory_transport: + self.channel._new_queue('ae.undeliver') self.consumers = [] def declare_consumer(self, consumer_cls, topic, callback): -- cgit From abd6b240b5247a2981e86c1d3161306fb2b4c4c5 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 31 Aug 2011 12:02:14 -0700 Subject: moved key_name per review --- nova/api/openstack/views/servers.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 35821f9a7..cd01c9373 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -79,8 +79,6 @@ class ViewBuilder(object): metadata[item['key']] = str(item['value']) inst_dict['metadata'] = metadata - inst_dict['key_name'] = inst.get('key_name', '') - inst_dict['hostId'] = '' if inst.get('host'): inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest() @@ -190,6 +188,7 @@ class ViewBuilderV11(ViewBuilder): def _build_extra(self, response, inst): self._build_links(response, inst) response['uuid'] = inst['uuid'] + response['key_name'] = inst.get('key_name', '') self._build_config_drive(response, inst) def _build_links(self, response, inst): -- cgit From f687e978a41c78e10e0c371c5486298925b5857f Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 31 Aug 2011 12:44:15 -0700 Subject: add explicit message for NoMoreFloatingIps exception --- nova/api/openstack/contrib/floating_ips.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 99c0d1469..6ce531c8f 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -96,7 +96,8 @@ class FloatingIPController(object): except rpc.RemoteError as ex: # NOTE(tr3buchet) - why does this block exist? if ex.exc_type == 'NoMoreFloatingIps': - raise webob.exc.HTTPBadRequest(explanation=ex.message) + msg = _("No more floating ips available.") + raise webob.exc.HTTPBadRequest(explanation=msg) else: raise -- cgit From c9758dd4832c167562baefad5dcc88f2a1a19b73 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 31 Aug 2011 12:51:16 -0700 Subject: fix keypairs stubs --- nova/api/openstack/create_instance_helper.py | 1 + nova/tests/api/openstack/fakes.py | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index d82cb534f..744353f31 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -189,6 +189,7 @@ class CreateInstanceHelper(object): raise exc.HTTPBadRequest(explanation=msg) except exception.KeypairNotFound as error: msg = _("Invalid key_name provided.") + raise exc.HTTPBadRequest(explanation=msg) except exception.SecurityGroupNotFound as error: raise exc.HTTPBadRequest(explanation=unicode(error)) except RemoteError as err: diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index a095dd90a..a4a90e6da 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -107,11 +107,18 @@ def stub_out_key_pair_funcs(stubs, have_key_pair=True): def key_pair(context, user_id): return [dict(name='key', public_key='public_key')] + def one_key_pair(context, user_id, name): + if name == 'key': + return dict(name='key', public_key='public_key') + else: + raise exc.KeypairNotFound(user_id=user_id, name=name) + def no_key_pair(context, user_id): return [] if have_key_pair: stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair) + stubs.Set(nova.db, 'key_pair_get', one_key_pair) else: stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair) -- cgit From a87a0bba9c7b046b36ee80bc033df5499cca35e1 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Wed, 31 Aug 2011 17:05:01 -0400 Subject: adding support for limiting in image service; updating tests with fixture ids and marker support --- nova/image/glance.py | 9 +++++++++ nova/tests/image/test_glance.py | 29 ++++++++++++++++++++++------- 2 files changed, 31 insertions(+), 7 deletions(-) diff --git a/nova/image/glance.py b/nova/image/glance.py index b5f52351f..7233eb18d 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -154,6 +154,15 @@ class GlanceImageService(service.BaseImageService): except KeyError: raise exception.ImagePaginationFailed() + try: + kwargs['limit'] = kwargs['limit'] - len(images) + # break if we have reached a provided limit + if kwargs['limit'] <= 0: + return + except KeyError: + # ignore missing limit, just proceed without it + pass + for image in self._fetch_images(fetch_func, **kwargs): yield image diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 0ff508ffa..5df25df37 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -38,7 +38,16 @@ class StubGlanceClient(object): return self.images[image_id] def get_images_detailed(self, filters=None, marker=None, limit=None): - return self.images.itervalues() + images = self.images.values() + if marker is None: + index = 0 + else: + for index, image in enumerate(images): + if image['id'] == marker: + index += 1 + break + # default to a page size of 3 to ensure we flex the pagination code + return images[index:index + 3] def get_image(self, image_id): return self.images[image_id], [] @@ -86,23 +95,23 @@ class TestGlanceImageServiceProperties(BaseGlanceTest): """Ensure attributes which aren't BASE_IMAGE_ATTRS are stored in the properties dict """ - fixtures = {'image1': {'name': 'image1', 'is_public': True, + fixtures = {'image1': {'id': '1', 'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': {'prop1': 'propvalue1'}}} self.client.images = fixtures image_meta = self.service.show(self.context, 'image1') - expected = {'name': 'image1', 'is_public': True, + expected = {'id': '1', 'name': 'image1', 'is_public': True, 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} self.assertEqual(image_meta, expected) def test_detail_passes_through_to_client(self): - fixtures = {'image1': {'name': 'image1', 'is_public': True, + fixtures = {'image1': {'id': '1', 'name': 'image1', 'is_public': True, 'foo': 'bar', 'properties': {'prop1': 'propvalue1'}}} self.client.images = fixtures image_meta = self.service.detail(self.context) - expected = [{'name': 'image1', 'is_public': True, + expected = [{'id': '1', 'name': 'image1', 'is_public': True, 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}] self.assertEqual(image_meta, expected) @@ -166,6 +175,7 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest): def _make_datetime_fixtures(self): fixtures = { 'image1': { + 'id': '1', 'name': 'image1', 'is_public': True, 'created_at': self.NOW_GLANCE_FORMAT, @@ -173,6 +183,7 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest): 'deleted_at': self.NOW_GLANCE_FORMAT, }, 'image2': { + 'id': '2', 'name': 'image2', 'is_public': True, 'created_at': self.NOW_GLANCE_OLD_FORMAT, @@ -183,13 +194,17 @@ class TestGetterDateTimeNoneTests(BaseGlanceTest): return fixtures def _make_none_datetime_fixtures(self): - fixtures = {'image1': {'name': 'image1', 'is_public': True, + fixtures = {'image1': {'id': '1', + 'name': 'image1', + 'is_public': True, 'updated_at': None, 'deleted_at': None}} return fixtures def _make_blank_datetime_fixtures(self): - fixtures = {'image1': {'name': 'image1', 'is_public': True, + fixtures = {'image1': {'id': '1', + 'name': 'image1', + 'is_public': True, 'updated_at': '', 'deleted_at': ''}} return fixtures -- cgit From 9de8a589b4ee0e007267efe2394b504382e4cdc1 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 31 Aug 2011 14:58:55 -0700 Subject: change to use _get_key_name to retrieve the key --- nova/api/openstack/servers.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index f288f2228..53684fa52 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -144,8 +144,15 @@ class Controller(object): except exception.NotFound: raise exc.HTTPNotFound() + def _get_key_name(self, req, body): + """ Get default keypair if not set """ + raise NotImplementedError() + def create(self, req, body): """ Creates a new server for a given user """ + if 'server' in body: + body['server']['key_name'] = self._get_key_name(req, body) + extra_values = None extra_values, instances = self.helper.create_instance( req, body, self.compute_api.create) @@ -564,17 +571,12 @@ class ControllerV10(Controller): raise exc.HTTPNotFound() return webob.Response(status_int=202) - def create(self, req, body): - """ Creates a new server for a given user """ - # note(ja): v1.0 injects the first keypair for the project for testing - if 'server' in body and not 'key_name' in body['server']: - context = req.environ["nova.context"] - keypairs = db.key_pair_get_all_by_user(context.elevated(), - context.user_id) - if keypairs: - body['server']['key_name'] = keypairs[0]['name'] - - return super(ControllerV10, self).create(req, body) + def _get_key_name(self, req, body): + context = req.environ["nova.context"] + keypairs = db.key_pair_get_all_by_user(context, + context.user_id) + if keypairs: + return keypairs[0]['name'] def _image_ref_from_req_data(self, data): return data['server']['imageId'] @@ -647,6 +649,10 @@ class ControllerV11(Controller): except exception.NotFound: raise exc.HTTPNotFound() + def _get_key_name(self, req, body): + if 'server' in body: + return body['server'].get('key_name') + def _image_ref_from_req_data(self, data): try: return data['server']['imageRef'] -- cgit From 642c9ceb1bae9fa5ba008cb69c47f449ea173c3a Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 31 Aug 2011 15:24:29 -0700 Subject: expect key_name attribute in 1.1 --- nova/tests/api/openstack/test_servers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index c54bead49..78c872a28 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3630,6 +3630,7 @@ class ServersViewBuilderV11Test(test.TestCase): "id": 1, "uuid": self.instance['uuid'], "name": "test_server", + "key_name": '', "links": [ { "rel": "self", @@ -3653,6 +3654,7 @@ class ServersViewBuilderV11Test(test.TestCase): "id": 1, "uuid": self.instance['uuid'], "name": "test_server", + "key_name": '', "config_drive": None, "links": [ { -- cgit From bd917feb287a3d0e8f2f9f9c60b716c7f599f4ff Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Wed, 31 Aug 2011 16:13:55 -0700 Subject: remove extra test --- nova/tests/api/openstack/test_server_actions.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 0c5fab41e..b9ef41465 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -244,19 +244,6 @@ class ServerActionsTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 500) - def test_resized_server_has_correct_status(self): - req = self.webreq('/1', 'GET') - - def fake_migration_get(*args): - return {} - - self.stubs.Set(nova.db.api, 'migration_get_by_instance_and_status', - fake_migration_get) - res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 200) - body = json.loads(res.body) - self.assertEqual(body['server']['status'], 'RESIZE-CONFIRM') - def test_confirm_resize_server(self): req = self.webreq('/1/action', 'POST', dict(confirmResize=None)) -- cgit From fdf076a04e001b897d01b2a8c4a9e3c980ea8f94 Mon Sep 17 00:00:00 2001 From: Anthony Young Date: Wed, 31 Aug 2011 21:34:10 -0700 Subject: fix for lp838583 - return instance_id for associated floating_ips, add test --- nova/api/openstack/contrib/floating_ips.py | 6 +++--- .../api/openstack/contrib/test_floating_ips.py | 21 +++++++++++++++++---- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index 6ce531c8f..d1add8f83 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -36,9 +36,9 @@ def _translate_floating_ip_view(floating_ip): result['fixed_ip'] = floating_ip['fixed_ip']['address'] except (TypeError, KeyError): result['fixed_ip'] = None - if 'instance' in floating_ip: - result['instance_id'] = floating_ip['instance']['id'] - else: + try: + result['instance_id'] = floating_ip['fixed_ip']['instance_id'] + except (TypeError, KeyError): result['instance_id'] = None return {'floating_ip': result} diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index fc10f2f6c..642f2b841 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -38,14 +38,13 @@ def network_api_get_floating_ip(self, context, id): def network_api_get_floating_ip_by_ip(self, context, address): return {'id': 1, 'address': '10.10.10.10', - 'fixed_ip': {'address': '11.0.0.1'}} + 'fixed_ip': {'address': '10.0.0.1', 'instance_id': 1}}, def network_api_list_floating_ips(self, context): return [{'id': 1, 'address': '10.10.10.10', - 'instance': {'id': 11}, - 'fixed_ip': {'address': '10.0.0.1'}}, + 'fixed_ip': {'address': '10.0.0.1', 'instance_id': 1}}, {'id': 2, 'address': '10.10.10.11'}] @@ -152,7 +151,7 @@ class FloatingIpTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) - response = {'floating_ips': [{'instance_id': 11, + response = {'floating_ips': [{'instance_id': 1, 'ip': '10.10.10.10', 'fixed_ip': '10.0.0.1', 'id': 1}, @@ -171,6 +170,20 @@ class FloatingIpTest(test.TestCase): self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10') self.assertEqual(res_dict['floating_ip']['instance_id'], None) + def test_show_associated_floating_ip(self): + def get_floating_ip(self, context, id): + return {'id': 1, 'address': '10.10.10.10', + 'fixed_ip': {'address': '10.0.0.1', 'instance_id': 1}} + self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip) + + req = webob.Request.blank('/v1.1/123/os-floating-ips/1') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['floating_ip']['id'], 1) + self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10') + self.assertEqual(res_dict['floating_ip']['instance_id'], 1) + def test_floating_ip_allocate_no_free_ips(self): def fake_call(*args, **kwargs): raise(rpc.RemoteError('NoMoreFloatingIps', '', '')) -- cgit From dd5eeafbfe1013fd9acdb119933cb5bf986706e6 Mon Sep 17 00:00:00 2001 From: Christopher MacGown Date: Thu, 1 Sep 2011 12:05:21 -0700 Subject: Adds test for image.glance.GlanceImageService._is_image_available --- nova/tests/image/test_glance.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 0ff508ffa..81a54346e 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -20,6 +20,7 @@ import datetime import unittest from nova import context +from nova import exception from nova import test from nova.image import glance @@ -96,6 +97,31 @@ class TestGlanceImageServiceProperties(BaseGlanceTest): 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} self.assertEqual(image_meta, expected) + def test_show_raises_when_no_authtoken_in_the_context(self): + fixtures = {'image1': {'name': 'image1', 'is_public': False, + 'foo': 'bar', + 'properties': {'prop1': 'propvalue1'}}} + self.client.images = fixtures + self.context.auth_token = False + + expected = {'name': 'image1', 'is_public': True, + 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} + self.assertRaises(exception.ImageNotFound, + self.service.show, self.context, 'image1') + + def test_show_passes_through_to_client_with_authtoken_in_context(self): + fixtures = {'image1': {'name': 'image1', 'is_public': False, + 'foo': 'bar', + 'properties': {'prop1': 'propvalue1'}}} + self.client.images = fixtures + self.context.auth_token = True + + expected = {'name': 'image1', 'is_public': False, + 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}} + + image_meta = self.service.show(self.context, 'image1') + self.assertEqual(image_meta, expected) + def test_detail_passes_through_to_client(self): fixtures = {'image1': {'name': 'image1', 'is_public': True, 'foo': 'bar', -- cgit From 59be9be68c0fd9b33b72257b8a1eb8c357ce9217 Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Thu, 1 Sep 2011 12:22:32 -0700 Subject: remove extra references to state_description --- bin/nova-manage | 6 +++--- nova/api/ec2/admin.py | 5 ++--- nova/api/openstack/contrib/simple_tenant_usage.py | 2 +- nova/tests/test_libvirt.py | 10 ++++++---- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index c9cf4266d..c3b2c71ce 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -166,7 +166,7 @@ class VpnCommands(object): print address, print vpn['host'], print ec2utils.id_to_ec2_id(vpn['id']), - print vpn['state_description'], + print vpn['vm_state'], print state else: print None @@ -869,7 +869,7 @@ class VmCommands(object): instance['hostname'], instance['host'], instance['instance_type'].name, - instance['state_description'], + instance['vm_state'], instance['launched_at'], instance['image_ref'], instance['kernel_id'], @@ -1223,7 +1223,7 @@ class VsaCommands(object): type=vc['instance_type']['name'], fl_ip=floating_addr, fx_ip=fixed_addr, - stat=vc['state_description'], + stat=vc['vm_state'], host=vc['host'], time=str(vc['created_at'])) diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index dfbbc0a2b..75e029509 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -21,7 +21,6 @@ Admin API controller, exposed through http via the api worker. """ import base64 -import datetime import netaddr import urllib @@ -33,6 +32,7 @@ from nova import log as logging from nova import utils from nova.api.ec2 import ec2utils from nova.auth import manager +from nova.compute import vm_states FLAGS = flags.FLAGS @@ -273,8 +273,7 @@ class AdminController(object): """Get the VPN instance for a project ID.""" for instance in db.instance_get_all_by_project(context, project_id): if (instance['image_id'] == str(FLAGS.vpn_image_id) - and not instance['state_description'] in - ['shutting_down', 'shutdown']): + and not instance['vm_state'] in [vm_states.DELETED]): return instance def start_vpn(self, context, project): diff --git a/nova/api/openstack/contrib/simple_tenant_usage.py b/nova/api/openstack/contrib/simple_tenant_usage.py index 69b38e229..42691a9fa 100644 --- a/nova/api/openstack/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/contrib/simple_tenant_usage.py @@ -116,7 +116,7 @@ class SimpleTenantUsageController(object): if info['ended_at']: info['state'] = 'terminated' else: - info['state'] = instance['state_description'] + info['state'] = instance['vm_state'] now = datetime.utcnow() diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 6a213b4f0..8c6775b29 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -34,6 +34,7 @@ from nova import test from nova import utils from nova.api.ec2 import cloud from nova.compute import power_state +from nova.compute import vm_states from nova.virt.libvirt import connection from nova.virt.libvirt import firewall @@ -674,8 +675,9 @@ class LibvirtConnTestCase(test.TestCase): # Preparing data self.compute = utils.import_object(FLAGS.compute_manager) - instance_dict = {'host': 'fake', 'state': power_state.RUNNING, - 'state_description': 'running'} + instance_dict = {'host': 'fake', + 'power_state': power_state.RUNNING, + 'vm_state': vm_states.ACTIVE} instance_ref = db.instance_create(self.context, self.test_instance) instance_ref = db.instance_update(self.context, instance_ref['id'], instance_dict) @@ -713,8 +715,8 @@ class LibvirtConnTestCase(test.TestCase): self.compute.rollback_live_migration) instance_ref = db.instance_get(self.context, instance_ref['id']) - self.assertTrue(instance_ref['state_description'] == 'running') - self.assertTrue(instance_ref['state'] == power_state.RUNNING) + self.assertTrue(instance_ref['vm_state'] == vm_states.ACTIVE) + self.assertTrue(instance_ref['power_state'] == power_state.RUNNING) volume_ref = db.volume_get(self.context, volume_ref['id']) self.assertTrue(volume_ref['status'] == 'in-use') -- cgit